aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c143
-rw-r--r--net/bluetooth/hci_core.c265
-rw-r--r--net/bluetooth/hci_event.c479
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2248
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c131
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c7
22 files changed, 2691 insertions, 1664 deletions
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f53..fa6d94a4602a 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 000000000000..4ff0bf3ba9a5
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#include <net/bluetooth/bluetooth.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h>
19
20/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
22{
23 struct a2mp_cmd *cmd;
24 int plen;
25
26 plen = sizeof(*cmd) + len;
27 cmd = kzalloc(plen, GFP_KERNEL);
28 if (!cmd)
29 return NULL;
30
31 cmd->code = code;
32 cmd->ident = ident;
33 cmd->len = cpu_to_le16(len);
34
35 memcpy(cmd->data, data, len);
36
37 return cmd;
38}
39
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
41 void *data)
42{
43 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd;
45 u16 total_len = len + sizeof(*cmd);
46 struct kvec iv;
47 struct msghdr msg;
48
49 cmd = __a2mp_build(code, ident, len, data);
50 if (!cmd)
51 return;
52
53 iv.iov_base = cmd;
54 iv.iov_len = total_len;
55
56 memset(&msg, 0, sizeof(msg));
57
58 msg.msg_iov = (struct iovec *) &iv;
59 msg.msg_iovlen = 1;
60
61 l2cap_chan_send(chan, &msg, total_len, 0);
62
63 kfree(cmd);
64}
65
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{
68 cl->id = 0;
69 cl->type = 0;
70 cl->status = 1;
71}
72
73/* hci_dev_list shall be locked */
74static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
75{
76 int i = 0;
77 struct hci_dev *hdev;
78
79 __a2mp_cl_bredr(cl);
80
81 list_for_each_entry(hdev, &hci_dev_list, list) {
82 /* Iterate through AMP controllers */
83 if (hdev->id == HCI_BREDR_ID)
84 continue;
85
86 /* Starting from second entry */
87 if (++i >= num_ctrl)
88 return;
89
90 cl[i].id = hdev->id;
91 cl[i].type = hdev->amp_type;
92 cl[i].status = hdev->amp_status;
93 }
94}
95
96/* Processing A2MP messages */
97static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
98 struct a2mp_cmd *hdr)
99{
100 struct a2mp_cmd_rej *rej = (void *) skb->data;
101
102 if (le16_to_cpu(hdr->len) < sizeof(*rej))
103 return -EINVAL;
104
105 BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
106
107 skb_pull(skb, sizeof(*rej));
108
109 return 0;
110}
111
112static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
113 struct a2mp_cmd *hdr)
114{
115 struct a2mp_discov_req *req = (void *) skb->data;
116 u16 len = le16_to_cpu(hdr->len);
117 struct a2mp_discov_rsp *rsp;
118 u16 ext_feat;
119 u8 num_ctrl;
120
121 if (len < sizeof(*req))
122 return -EINVAL;
123
124 skb_pull(skb, sizeof(*req));
125
126 ext_feat = le16_to_cpu(req->ext_feat);
127
128 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
129
130 /* check that packet is not broken for now */
131 while (ext_feat & A2MP_FEAT_EXT) {
132 if (len < sizeof(ext_feat))
133 return -EINVAL;
134
135 ext_feat = get_unaligned_le16(skb->data);
136 BT_DBG("efm 0x%4.4x", ext_feat);
137 len -= sizeof(ext_feat);
138 skb_pull(skb, sizeof(ext_feat));
139 }
140
141 read_lock(&hci_dev_list_lock);
142
143 num_ctrl = __hci_num_ctrl();
144 len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
145 rsp = kmalloc(len, GFP_ATOMIC);
146 if (!rsp) {
147 read_unlock(&hci_dev_list_lock);
148 return -ENOMEM;
149 }
150
151 rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
152 rsp->ext_feat = 0;
153
154 __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
155
156 read_unlock(&hci_dev_list_lock);
157
158 a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
159
160 kfree(rsp);
161 return 0;
162}
163
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr)
166{
167 struct a2mp_cl *cl = (void *) skb->data;
168
169 while (skb->len >= sizeof(*cl)) {
170 BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
171 cl->status);
172 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
173 }
174
175 /* TODO send A2MP_CHANGE_RSP */
176
177 return 0;
178}
179
180static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr)
182{
183 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev;
186
187 if (le16_to_cpu(hdr->len) < sizeof(*req))
188 return -EINVAL;
189
190 BT_DBG("id %d", req->id);
191
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) {
197 rsp.status = 0;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
203 }
204
205 if (hdev)
206 hci_dev_put(hdev);
207
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req));
211 return 0;
212}
213
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr)
216{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev;
219
220 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL;
222
223 BT_DBG("id %d", req->id);
224
225 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) {
227 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
230
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp);
233 goto clean;
234 }
235
236 /* Placeholder for HCI Read AMP Assoc */
237
238clean:
239 if (hdev)
240 hci_dev_put(hdev);
241
242 skb_pull(skb, sizeof(*req));
243 return 0;
244}
245
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr)
248{
249 struct a2mp_physlink_req *req = (void *) skb->data;
250
251 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev;
253
254 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL;
256
257 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
258
259 rsp.local_id = req->remote_id;
260 rsp.remote_id = req->local_id;
261
262 hdev = hci_dev_get(req->remote_id);
263 if (!hdev || hdev->amp_type != HCI_AMP) {
264 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
265 goto send_rsp;
266 }
267
268 /* TODO process physlink create */
269
270 rsp.status = A2MP_STATUS_SUCCESS;
271
272send_rsp:
273 if (hdev)
274 hci_dev_put(hdev);
275
276 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
277 &rsp);
278
279 skb_pull(skb, le16_to_cpu(hdr->len));
280 return 0;
281}
282
283static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
284 struct a2mp_cmd *hdr)
285{
286 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev;
289
290 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL;
292
293 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
294
295 rsp.local_id = req->remote_id;
296 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS;
298
299 hdev = hci_dev_get(req->local_id);
300 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp;
303 }
304
305 /* TODO Disconnect Phys Link here */
306
307 hci_dev_put(hdev);
308
309send_rsp:
310 a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
311
312 skb_pull(skb, sizeof(*req));
313 return 0;
314}
315
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr)
318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
320
321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0;
323}
324
325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{
328 struct a2mp_cmd *hdr = (void *) skb->data;
329 struct amp_mgr *mgr = chan->data;
330 int err = 0;
331
332 amp_mgr_get(mgr);
333
334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data;
336 u16 len = le16_to_cpu(hdr->len);
337
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
339
340 skb_pull(skb, sizeof(*hdr));
341
342 if (len > skb->len || !hdr->ident) {
343 err = -EINVAL;
344 break;
345 }
346
347 mgr->ident = hdr->ident;
348
349 switch (hdr->code) {
350 case A2MP_COMMAND_REJ:
351 a2mp_command_rej(mgr, skb, hdr);
352 break;
353
354 case A2MP_DISCOVER_REQ:
355 err = a2mp_discover_req(mgr, skb, hdr);
356 break;
357
358 case A2MP_CHANGE_NOTIFY:
359 err = a2mp_change_notify(mgr, skb, hdr);
360 break;
361
362 case A2MP_GETINFO_REQ:
363 err = a2mp_getinfo_req(mgr, skb, hdr);
364 break;
365
366 case A2MP_GETAMPASSOC_REQ:
367 err = a2mp_getampassoc_req(mgr, skb, hdr);
368 break;
369
370 case A2MP_CREATEPHYSLINK_REQ:
371 err = a2mp_createphyslink_req(mgr, skb, hdr);
372 break;
373
374 case A2MP_DISCONNPHYSLINK_REQ:
375 err = a2mp_discphyslink_req(mgr, skb, hdr);
376 break;
377
378 case A2MP_CHANGE_RSP:
379 case A2MP_DISCOVER_RSP:
380 case A2MP_GETINFO_RSP:
381 case A2MP_GETAMPASSOC_RSP:
382 case A2MP_CREATEPHYSLINK_RSP:
383 case A2MP_DISCONNPHYSLINK_RSP:
384 err = a2mp_cmd_rsp(mgr, skb, hdr);
385 break;
386
387 default:
388 BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
389 err = -EINVAL;
390 break;
391 }
392 }
393
394 if (err) {
395 struct a2mp_cmd_rej rej;
396 rej.reason = __constant_cpu_to_le16(0);
397
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399
400 a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
401 &rej);
402 }
403
404 /* Always free skb and return success error code to prevent
405 from sending L2CAP Disconnect over A2MP channel */
406 kfree_skb(skb);
407
408 amp_mgr_put(mgr);
409
410 return 0;
411}
412
413static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{
415 l2cap_chan_destroy(chan);
416}
417
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
419{
420 struct amp_mgr *mgr = chan->data;
421
422 if (!mgr)
423 return;
424
425 BT_DBG("chan %p state %s", chan, state_to_string(state));
426
427 chan->state = state;
428
429 switch (state) {
430 case BT_CLOSED:
431 if (mgr)
432 amp_mgr_put(mgr);
433 break;
434 }
435}
436
437static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
438 unsigned long len, int nb)
439{
440 return bt_skb_alloc(len, GFP_KERNEL);
441}
442
443static struct l2cap_ops a2mp_chan_ops = {
444 .name = "L2CAP A2MP channel",
445 .recv = a2mp_chan_recv_cb,
446 .close = a2mp_chan_close_cb,
447 .state_change = a2mp_chan_state_change_cb,
448 .alloc_skb = a2mp_chan_alloc_skb_cb,
449
450 /* Not implemented for A2MP */
451 .new_connection = l2cap_chan_no_new_connection,
452 .teardown = l2cap_chan_no_teardown,
453 .ready = l2cap_chan_no_ready,
454};
455
456static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
457{
458 struct l2cap_chan *chan;
459 int err;
460
461 chan = l2cap_chan_create();
462 if (!chan)
463 return NULL;
464
465 BT_DBG("chan %p", chan);
466
467 chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
468 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
469
470 chan->ops = &a2mp_chan_ops;
471
472 l2cap_chan_set_defaults(chan);
473 chan->remote_max_tx = chan->max_tx;
474 chan->remote_tx_win = chan->tx_win;
475
476 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
477 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
478
479 skb_queue_head_init(&chan->tx_q);
480
481 chan->mode = L2CAP_MODE_ERTM;
482
483 err = l2cap_ertm_init(chan);
484 if (err < 0) {
485 l2cap_chan_del(chan, 0);
486 return NULL;
487 }
488
489 chan->conf_state = 0;
490
491 l2cap_chan_add(conn, chan);
492
493 chan->remote_mps = chan->omtu;
494 chan->mps = chan->omtu;
495
496 chan->state = BT_CONNECTED;
497
498 return chan;
499}
500
501/* AMP Manager functions */
502void amp_mgr_get(struct amp_mgr *mgr)
503{
504 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
505
506 kref_get(&mgr->kref);
507}
508
509static void amp_mgr_destroy(struct kref *kref)
510{
511 struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
512
513 BT_DBG("mgr %p", mgr);
514
515 kfree(mgr);
516}
517
518int amp_mgr_put(struct amp_mgr *mgr)
519{
520 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
521
522 return kref_put(&mgr->kref, &amp_mgr_destroy);
523}
524
525static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
526{
527 struct amp_mgr *mgr;
528 struct l2cap_chan *chan;
529
530 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
531 if (!mgr)
532 return NULL;
533
534 BT_DBG("conn %p mgr %p", conn, mgr);
535
536 mgr->l2cap_conn = conn;
537
538 chan = a2mp_chan_open(conn);
539 if (!chan) {
540 kfree(mgr);
541 return NULL;
542 }
543
544 mgr->a2mp_chan = chan;
545 chan->data = mgr;
546
547 conn->hcon->amp_mgr = mgr;
548
549 kref_init(&mgr->kref);
550
551 return mgr;
552}
553
554struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
555 struct sk_buff *skb)
556{
557 struct amp_mgr *mgr;
558
559 mgr = amp_mgr_create(conn);
560 if (!mgr) {
561 BT_ERR("Could not create AMP manager");
562 return NULL;
563 }
564
565 BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
566
567 return mgr->a2mp_chan;
568}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc4..f7db5792ec64 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/poll.h>
37#include <net/sock.h>
38#include <asm/ioctls.h> 28#include <asm/ioctls.h>
39#include <linux/kmod.h>
40 29
41#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
42 31
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
418 return 0; 407 return 0;
419} 408}
420 409
421unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) 410unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 poll_table *wait)
422{ 412{
423 struct sock *sk = sock->sk; 413 struct sock *sk = sock->sk;
424 unsigned int mask = 0; 414 unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d656754..4a6620bc1570 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/freezer.h>
36#include <linux/errno.h>
37#include <linux/net.h>
38#include <linux/slab.h>
39#include <linux/kthread.h> 29#include <linux/kthread.h>
40#include <net/sock.h>
41
42#include <linux/socket.h>
43#include <linux/file.h> 30#include <linux/file.h>
44
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48
49#include <asm/unaligned.h> 32#include <asm/unaligned.h>
50 33
51#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
306 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 289 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
307}; 290};
308 291
309static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 292static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
310{ 293{
311 struct net_device *dev = s->dev; 294 struct net_device *dev = s->dev;
312 struct sk_buff *nskb; 295 struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
404 BNEP_COMPRESSED 387 BNEP_COMPRESSED
405}; 388};
406 389
407static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) 390static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
408{ 391{
409 struct ethhdr *eh = (void *) skb->data; 392 struct ethhdr *eh = (void *) skb->data;
410 struct socket *sock = s->sock; 393 struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d97..98f86f91d47c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29#include <linux/slab.h>
30
31#include <linux/socket.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36
37#include <asm/unaligned.h>
38 30
39#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
128} 120}
129 121
130#ifdef CONFIG_BT_BNEP_MC_FILTER 122#ifdef CONFIG_BT_BNEP_MC_FILTER
131static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 123static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
132{ 124{
133 struct ethhdr *eh = (void *) skb->data; 125 struct ethhdr *eh = (void *) skb->data;
134 126
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
140 132
141#ifdef CONFIG_BT_BNEP_PROTO_FILTER 133#ifdef CONFIG_BT_BNEP_PROTO_FILTER
142/* Determine ether protocol. Based on eth_type_trans. */ 134/* Determine ether protocol. Based on eth_type_trans. */
143static inline u16 bnep_net_eth_proto(struct sk_buff *skb) 135static u16 bnep_net_eth_proto(struct sk_buff *skb)
144{ 136{
145 struct ethhdr *eh = (void *) skb->data; 137 struct ethhdr *eh = (void *) skb->data;
146 u16 proto = ntohs(eh->h_proto); 138 u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
154 return ETH_P_802_2; 146 return ETH_P_802_2;
155} 147}
156 148
157static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) 149static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
158{ 150{
159 u16 proto = bnep_net_eth_proto(skb); 151 u16 proto = bnep_net_eth_proto(skb);
160 struct bnep_proto_filter *f = s->proto_filter; 152 struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810d..5e5f5b410e0b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/skbuff.h>
36#include <linux/socket.h>
37#include <linux/ioctl.h>
38#include <linux/file.h> 28#include <linux/file.h>
39#include <linux/init.h>
40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <linux/uaccess.h>
43#include <net/sock.h>
44
45 29
46#include "bnep.h" 30#include "bnep.h"
47 31
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed9731..5ad7da217474 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
24 24
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42 28
43#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/a2mp.h>
45 32
46static void hci_le_connect(struct hci_conn *conn) 33static void hci_le_connect(struct hci_conn *conn)
47{ 34{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
54 conn->sec_level = BT_SECURITY_LOW; 41 conn->sec_level = BT_SECURITY_LOW;
55 42
56 memset(&cp, 0, sizeof(cp)); 43 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060); 44 cp.scan_interval = __constant_cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030); 45 cp.scan_window = __constant_cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst); 46 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type; 47 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028); 48 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038); 49 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a); 50 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000); 51 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000); 52 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
66 53
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 54 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68} 55}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 86 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode; 87 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset | 88 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000); 89 __constant_cpu_to_le16(0x8000);
103 } 90 }
104 91
105 memcpy(conn->dev_class, ie->data.dev_class, 3); 92 memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -120,7 +107,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
120{ 107{
121 struct hci_cp_create_conn_cancel cp; 108 struct hci_cp_create_conn_cancel cp;
122 109
123 BT_DBG("%p", conn); 110 BT_DBG("hcon %p", conn);
124 111
125 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) 112 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
126 return; 113 return;
@@ -133,7 +120,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133{ 120{
134 struct hci_cp_disconnect cp; 121 struct hci_cp_disconnect cp;
135 122
136 BT_DBG("%p", conn); 123 BT_DBG("hcon %p", conn);
137 124
138 conn->state = BT_DISCONN; 125 conn->state = BT_DISCONN;
139 126
@@ -147,7 +134,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
147 struct hci_dev *hdev = conn->hdev; 134 struct hci_dev *hdev = conn->hdev;
148 struct hci_cp_add_sco cp; 135 struct hci_cp_add_sco cp;
149 136
150 BT_DBG("%p", conn); 137 BT_DBG("hcon %p", conn);
151 138
152 conn->state = BT_CONNECT; 139 conn->state = BT_CONNECT;
153 conn->out = true; 140 conn->out = true;
@@ -165,7 +152,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
165 struct hci_dev *hdev = conn->hdev; 152 struct hci_dev *hdev = conn->hdev;
166 struct hci_cp_setup_sync_conn cp; 153 struct hci_cp_setup_sync_conn cp;
167 154
168 BT_DBG("%p", conn); 155 BT_DBG("hcon %p", conn);
169 156
170 conn->state = BT_CONNECT; 157 conn->state = BT_CONNECT;
171 conn->out = true; 158 conn->out = true;
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
175 cp.handle = cpu_to_le16(handle); 162 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type); 163 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 164
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 165 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 166 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff); 167 cp.max_latency = __constant_cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 168 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff; 169 cp.retrans_effort = 0xff;
183 170
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
185} 172}
186 173
187void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 174void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier) 175 u16 latency, u16 to_multiplier)
189{ 176{
190 struct hci_cp_le_conn_update cp; 177 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev; 178 struct hci_dev *hdev = conn->hdev;
@@ -197,20 +184,19 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
197 cp.conn_interval_max = cpu_to_le16(max); 184 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency); 185 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier); 186 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001); 187 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001); 188 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
202 189
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 190 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204} 191}
205EXPORT_SYMBOL(hci_le_conn_update);
206 192
207void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 193void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16]) 194 __u8 ltk[16])
209{ 195{
210 struct hci_dev *hdev = conn->hdev; 196 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp; 197 struct hci_cp_le_start_enc cp;
212 198
213 BT_DBG("%p", conn); 199 BT_DBG("hcon %p", conn);
214 200
215 memset(&cp, 0, sizeof(cp)); 201 memset(&cp, 0, sizeof(cp));
216 202
@@ -221,18 +207,17 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
221 207
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 208 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223} 209}
224EXPORT_SYMBOL(hci_le_start_enc);
225 210
226/* Device _must_ be locked */ 211/* Device _must_ be locked */
227void hci_sco_setup(struct hci_conn *conn, __u8 status) 212void hci_sco_setup(struct hci_conn *conn, __u8 status)
228{ 213{
229 struct hci_conn *sco = conn->link; 214 struct hci_conn *sco = conn->link;
230 215
231 BT_DBG("%p", conn);
232
233 if (!sco) 216 if (!sco)
234 return; 217 return;
235 218
219 BT_DBG("hcon %p", conn);
220
236 if (!status) { 221 if (!status) {
237 if (lmp_esco_capable(conn->hdev)) 222 if (lmp_esco_capable(conn->hdev))
238 hci_setup_sync(sco, conn->handle); 223 hci_setup_sync(sco, conn->handle);
@@ -247,10 +232,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
247static void hci_conn_timeout(struct work_struct *work) 232static void hci_conn_timeout(struct work_struct *work)
248{ 233{
249 struct hci_conn *conn = container_of(work, struct hci_conn, 234 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work); 235 disc_work.work);
251 __u8 reason; 236 __u8 reason;
252 237
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); 238 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
254 239
255 if (atomic_read(&conn->refcnt)) 240 if (atomic_read(&conn->refcnt))
256 return; 241 return;
@@ -281,7 +266,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
281{ 266{
282 struct hci_dev *hdev = conn->hdev; 267 struct hci_dev *hdev = conn->hdev;
283 268
284 BT_DBG("conn %p mode %d", conn, conn->mode); 269 BT_DBG("hcon %p mode %d", conn, conn->mode);
285 270
286 if (test_bit(HCI_RAW, &hdev->flags)) 271 if (test_bit(HCI_RAW, &hdev->flags))
287 return; 272 return;
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 280 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp; 281 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle); 282 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0); 283 cp.max_latency = __constant_cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0); 284 cp.min_remote_timeout = __constant_cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0); 285 cp.min_local_timeout = __constant_cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 286 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 } 287 }
303 288
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
306 cp.handle = cpu_to_le16(conn->handle); 291 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 292 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 293 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4); 294 cp.attempt = __constant_cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1); 295 cp.timeout = __constant_cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 296 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 } 297 }
313} 298}
@@ -316,7 +301,7 @@ static void hci_conn_idle(unsigned long arg)
316{ 301{
317 struct hci_conn *conn = (void *) arg; 302 struct hci_conn *conn = (void *) arg;
318 303
319 BT_DBG("conn %p mode %d", conn, conn->mode); 304 BT_DBG("hcon %p mode %d", conn, conn->mode);
320 305
321 hci_conn_enter_sniff_mode(conn); 306 hci_conn_enter_sniff_mode(conn);
322} 307}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
327 struct hci_dev *hdev = conn->hdev; 312 struct hci_dev *hdev = conn->hdev;
328 313
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst); 315 &conn->dst);
331} 316}
332 317
333struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 361 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 362 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 363 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn); 364 (unsigned long) conn);
380 365
381 atomic_set(&conn->refcnt, 0); 366 atomic_set(&conn->refcnt, 0);
382 367
@@ -397,7 +382,7 @@ int hci_conn_del(struct hci_conn *conn)
397{ 382{
398 struct hci_dev *hdev = conn->hdev; 383 struct hci_dev *hdev = conn->hdev;
399 384
400 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 385 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
401 386
402 del_timer(&conn->idle_timer); 387 del_timer(&conn->idle_timer);
403 388
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
425 } 410 }
426 } 411 }
427 412
428
429 hci_chan_list_flush(conn); 413 hci_chan_list_flush(conn);
430 414
415 if (conn->amp_mgr)
416 amp_mgr_put(conn->amp_mgr);
417
431 hci_conn_hash_del(hdev, conn); 418 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify) 419 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 420 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,9 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454 read_lock(&hci_dev_list_lock); 441 read_lock(&hci_dev_list_lock);
455 442
456 list_for_each_entry(d, &hci_dev_list, list) { 443 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 444 if (!test_bit(HCI_UP, &d->flags) ||
445 test_bit(HCI_RAW, &d->flags) ||
446 d->dev_type != HCI_BREDR)
458 continue; 447 continue;
459 448
460 /* Simple routing: 449 /* Simple routing:
@@ -495,6 +484,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 if (type == LE_LINK) { 484 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 485 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) { 486 if (!le) {
487 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
488 BT_CONNECT);
489 if (le)
490 return ERR_PTR(-EBUSY);
491
498 le = hci_conn_add(hdev, LE_LINK, dst); 492 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le) 493 if (!le)
500 return ERR_PTR(-ENOMEM); 494 return ERR_PTR(-ENOMEM);
@@ -545,7 +539,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
545 hci_conn_hold(sco); 539 hci_conn_hold(sco);
546 540
547 if (acl->state == BT_CONNECTED && 541 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 542 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 543 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 544 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551 545
@@ -560,24 +554,22 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
560 554
561 return sco; 555 return sco;
562} 556}
563EXPORT_SYMBOL(hci_connect);
564 557
565/* Check link security requirement */ 558/* Check link security requirement */
566int hci_conn_check_link_mode(struct hci_conn *conn) 559int hci_conn_check_link_mode(struct hci_conn *conn)
567{ 560{
568 BT_DBG("conn %p", conn); 561 BT_DBG("hcon %p", conn);
569 562
570 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT)) 563 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
571 return 0; 564 return 0;
572 565
573 return 1; 566 return 1;
574} 567}
575EXPORT_SYMBOL(hci_conn_check_link_mode);
576 568
577/* Authenticate remote device */ 569/* Authenticate remote device */
578static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 570static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
579{ 571{
580 BT_DBG("conn %p", conn); 572 BT_DBG("hcon %p", conn);
581 573
582 if (conn->pending_sec_level > sec_level) 574 if (conn->pending_sec_level > sec_level)
583 sec_level = conn->pending_sec_level; 575 sec_level = conn->pending_sec_level;
@@ -600,7 +592,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
600 592
601 cp.handle = cpu_to_le16(conn->handle); 593 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 594 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp); 595 sizeof(cp), &cp);
604 if (conn->key_type != 0xff) 596 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 597 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 } 598 }
@@ -611,21 +603,21 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
611/* Encrypt the the link */ 603/* Encrypt the the link */
612static void hci_conn_encrypt(struct hci_conn *conn) 604static void hci_conn_encrypt(struct hci_conn *conn)
613{ 605{
614 BT_DBG("conn %p", conn); 606 BT_DBG("hcon %p", conn);
615 607
616 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 608 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
617 struct hci_cp_set_conn_encrypt cp; 609 struct hci_cp_set_conn_encrypt cp;
618 cp.handle = cpu_to_le16(conn->handle); 610 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01; 611 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 612 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp); 613 &cp);
622 } 614 }
623} 615}
624 616
625/* Enable security */ 617/* Enable security */
626int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 618int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
627{ 619{
628 BT_DBG("conn %p", conn); 620 BT_DBG("hcon %p", conn);
629 621
630 /* For sdp we don't need the link key. */ 622 /* For sdp we don't need the link key. */
631 if (sec_level == BT_SECURITY_SDP) 623 if (sec_level == BT_SECURITY_SDP)
@@ -648,8 +640,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
648 /* An unauthenticated combination key has sufficient security for 640 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */ 641 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && 642 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM || 643 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt; 644 goto encrypt;
654 645
655 /* A combination key has always sufficient security for the security 646 /* A combination key has always sufficient security for the security
@@ -657,8 +648,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
657 is generated using maximum PIN code length (16). 648 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */ 649 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION && 650 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH || 651 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
661 conn->pin_length == 16))
662 goto encrypt; 652 goto encrypt;
663 653
664auth: 654auth:
@@ -680,7 +670,7 @@ EXPORT_SYMBOL(hci_conn_security);
680/* Check secure link requirement */ 670/* Check secure link requirement */
681int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) 671int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
682{ 672{
683 BT_DBG("conn %p", conn); 673 BT_DBG("hcon %p", conn);
684 674
685 if (sec_level != BT_SECURITY_HIGH) 675 if (sec_level != BT_SECURITY_HIGH)
686 return 1; /* Accept if non-secure is required */ 676 return 1; /* Accept if non-secure is required */
@@ -695,23 +685,22 @@ EXPORT_SYMBOL(hci_conn_check_secure);
695/* Change link key */ 685/* Change link key */
696int hci_conn_change_link_key(struct hci_conn *conn) 686int hci_conn_change_link_key(struct hci_conn *conn)
697{ 687{
698 BT_DBG("conn %p", conn); 688 BT_DBG("hcon %p", conn);
699 689
700 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 690 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
701 struct hci_cp_change_conn_link_key cp; 691 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle); 692 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 693 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp); 694 sizeof(cp), &cp);
705 } 695 }
706 696
707 return 0; 697 return 0;
708} 698}
709EXPORT_SYMBOL(hci_conn_change_link_key);
710 699
711/* Switch role */ 700/* Switch role */
712int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 701int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
713{ 702{
714 BT_DBG("conn %p", conn); 703 BT_DBG("hcon %p", conn);
715 704
716 if (!role && conn->link_mode & HCI_LM_MASTER) 705 if (!role && conn->link_mode & HCI_LM_MASTER)
717 return 1; 706 return 1;
@@ -732,7 +721,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
732{ 721{
733 struct hci_dev *hdev = conn->hdev; 722 struct hci_dev *hdev = conn->hdev;
734 723
735 BT_DBG("conn %p mode %d", conn, conn->mode); 724 BT_DBG("hcon %p mode %d", conn, conn->mode);
736 725
737 if (test_bit(HCI_RAW, &hdev->flags)) 726 if (test_bit(HCI_RAW, &hdev->flags))
738 return; 727 return;
@@ -752,7 +741,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
752timer: 741timer:
753 if (hdev->idle_timeout > 0) 742 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer, 743 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 744 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756} 745}
757 746
758/* Drop all connection on the device */ 747/* Drop all connection on the device */
@@ -802,7 +791,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
802 791
803int hci_get_conn_list(void __user *arg) 792int hci_get_conn_list(void __user *arg)
804{ 793{
805 register struct hci_conn *c; 794 struct hci_conn *c;
806 struct hci_conn_list_req req, *cl; 795 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci; 796 struct hci_conn_info *ci;
808 struct hci_dev *hdev; 797 struct hci_dev *hdev;
@@ -906,7 +895,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
906 struct hci_dev *hdev = conn->hdev; 895 struct hci_dev *hdev = conn->hdev;
907 struct hci_chan *chan; 896 struct hci_chan *chan;
908 897
909 BT_DBG("%s conn %p", hdev->name, conn); 898 BT_DBG("%s hcon %p", hdev->name, conn);
910 899
911 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL); 900 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
912 if (!chan) 901 if (!chan)
@@ -925,7 +914,7 @@ int hci_chan_del(struct hci_chan *chan)
925 struct hci_conn *conn = chan->conn; 914 struct hci_conn *conn = chan->conn;
926 struct hci_dev *hdev = conn->hdev; 915 struct hci_dev *hdev = conn->hdev;
927 916
928 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); 917 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
929 918
930 list_del_rcu(&chan->list); 919 list_del_rcu(&chan->list);
931 920
@@ -941,7 +930,7 @@ void hci_chan_list_flush(struct hci_conn *conn)
941{ 930{
942 struct hci_chan *chan, *n; 931 struct hci_chan *chan, *n;
943 932
944 BT_DBG("conn %p", conn); 933 BT_DBG("hcon %p", conn);
945 934
946 list_for_each_entry_safe(chan, n, &conn->chan_list, list) 935 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
947 hci_chan_del(chan); 936 hci_chan_del(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647b..d4de5db18d5a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,34 +25,14 @@
25 25
26/* Bluetooth HCI core. */ 26/* Bluetooth HCI core. */
27 27
28#include <linux/jiffies.h> 28#include <linux/export.h>
29#include <linux/module.h> 29#include <linux/idr.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47 30
48#include <linux/uaccess.h> 31#include <linux/rfkill.h>
49#include <asm/unaligned.h>
50 32
51#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
53 35
54#define AUTO_OFF_TIMEOUT 2000
55
56static void hci_rx_work(struct work_struct *work); 36static void hci_rx_work(struct work_struct *work);
57static void hci_cmd_work(struct work_struct *work); 37static void hci_cmd_work(struct work_struct *work);
58static void hci_tx_work(struct work_struct *work); 38static void hci_tx_work(struct work_struct *work);
@@ -65,6 +45,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
65LIST_HEAD(hci_cb_list); 45LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock); 46DEFINE_RWLOCK(hci_cb_list_lock);
67 47
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
68/* ---- HCI notifications ---- */ 51/* ---- HCI notifications ---- */
69 52
70static void hci_notify(struct hci_dev *hdev, int event) 53static void hci_notify(struct hci_dev *hdev, int event)
@@ -76,7 +59,7 @@ static void hci_notify(struct hci_dev *hdev, int event)
76 59
77void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78{ 61{
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
80 63
81 /* If this is the init phase check if the completed command matches 64 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return. 65 * the last init command, and if not just return.
@@ -124,8 +107,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
124} 107}
125 108
126/* Execute request and wait for completion. */ 109/* Execute request and wait for completion. */
127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 110static int __hci_request(struct hci_dev *hdev,
128 unsigned long opt, __u32 timeout) 111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
129{ 113{
130 DECLARE_WAITQUEUE(wait, current); 114 DECLARE_WAITQUEUE(wait, current);
131 int err = 0; 115 int err = 0;
@@ -166,8 +150,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
166 return err; 150 return err;
167} 151}
168 152
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 153static int hci_request(struct hci_dev *hdev,
170 unsigned long opt, __u32 timeout) 154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
171{ 156{
172 int ret; 157 int ret;
173 158
@@ -201,12 +186,6 @@ static void bredr_init(struct hci_dev *hdev)
201 186
202 /* Mandatory initialization */ 187 /* Mandatory initialization */
203 188
204 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 }
209
210 /* Read Local Supported Features */ 189 /* Read Local Supported Features */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 191
@@ -235,7 +214,7 @@ static void bredr_init(struct hci_dev *hdev)
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 215
237 /* Connection accept timeout ~20 secs */ 216 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00); 217 param = __constant_cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 219
241 bacpy(&cp.bdaddr, BDADDR_ANY); 220 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -247,9 +226,6 @@ static void amp_init(struct hci_dev *hdev)
247{ 226{
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249 228
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */ 229 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 231
@@ -275,6 +251,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
275 } 251 }
276 skb_queue_purge(&hdev->driver_init); 252 skb_queue_purge(&hdev->driver_init);
277 253
254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
278 switch (hdev->dev_type) { 258 switch (hdev->dev_type) {
279 case HCI_BREDR: 259 case HCI_BREDR:
280 bredr_init(hdev); 260 bredr_init(hdev);
@@ -417,7 +397,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
417 INIT_LIST_HEAD(&cache->resolve); 397 INIT_LIST_HEAD(&cache->resolve);
418} 398}
419 399
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
421{ 402{
422 struct discovery_state *cache = &hdev->discovery; 403 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e; 404 struct inquiry_entry *e;
@@ -478,7 +459,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
478 459
479 list_for_each_entry(p, &cache->resolve, list) { 460 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING && 461 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi)) 462 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break; 463 break;
483 pos = &p->list; 464 pos = &p->list;
484 } 465 }
@@ -503,7 +484,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
503 *ssp = true; 484 *ssp = true;
504 485
505 if (ie->name_state == NAME_NEEDED && 486 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) { 487 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi; 488 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie); 489 hci_inquiry_cache_update_resolve(hdev, ie);
509 } 490 }
@@ -527,7 +508,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
527 508
528update: 509update:
529 if (name_known && ie->name_state != NAME_KNOWN && 510 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) { 511 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN; 512 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list); 513 list_del(&ie->list);
533 } 514 }
@@ -605,8 +586,7 @@ int hci_inquiry(void __user *arg)
605 586
606 hci_dev_lock(hdev); 587 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) || 589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev); 590 inquiry_cache_flush(hdev);
611 do_inquiry = 1; 591 do_inquiry = 1;
612 } 592 }
@@ -620,7 +600,9 @@ int hci_inquiry(void __user *arg)
620 goto done; 600 goto done;
621 } 601 }
622 602
623 /* for unlimited number of responses we will use buffer with 255 entries */ 603 /* for unlimited number of responses we will use buffer with
604 * 255 entries
605 */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 607
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +623,7 @@ int hci_inquiry(void __user *arg)
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir); 624 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp)) 626 ir.num_rsp))
645 err = -EFAULT; 627 err = -EFAULT;
646 } else 628 } else
647 err = -EFAULT; 629 err = -EFAULT;
@@ -701,12 +683,11 @@ int hci_dev_open(__u16 dev)
701 set_bit(HCI_INIT, &hdev->flags); 683 set_bit(HCI_INIT, &hdev->flags);
702 hdev->init_last_cmd = 0; 684 hdev->init_last_cmd = 0;
703 685
704 ret = __hci_request(hdev, hci_init_req, 0, 686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
705 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 687
707 if (lmp_host_le_capable(hdev)) 688 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0, 689 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 690 HCI_INIT_TIMEOUT);
710 691
711 clear_bit(HCI_INIT, &hdev->flags); 692 clear_bit(HCI_INIT, &hdev->flags);
712 } 693 }
@@ -791,10 +772,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
791 skb_queue_purge(&hdev->cmd_q); 772 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1); 773 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) && 774 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 775 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags); 776 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0, 777 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
797 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags); 778 clear_bit(HCI_INIT, &hdev->flags);
799 } 779 }
800 780
@@ -883,8 +863,7 @@ int hci_dev_reset(__u16 dev)
883 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 863 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
884 864
885 if (!test_bit(HCI_RAW, &hdev->flags)) 865 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0, 866 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 867
889done: 868done:
890 hci_req_unlock(hdev); 869 hci_req_unlock(hdev);
@@ -924,7 +903,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
924 switch (cmd) { 903 switch (cmd) {
925 case HCISETAUTH: 904 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 906 HCI_INIT_TIMEOUT);
928 break; 907 break;
929 908
930 case HCISETENCRYPT: 909 case HCISETENCRYPT:
@@ -936,23 +915,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
936 if (!test_bit(HCI_AUTH, &hdev->flags)) { 915 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */ 916 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 917 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 918 HCI_INIT_TIMEOUT);
940 if (err) 919 if (err)
941 break; 920 break;
942 } 921 }
943 922
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 923 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 924 HCI_INIT_TIMEOUT);
946 break; 925 break;
947 926
948 case HCISETSCAN: 927 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 928 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 929 HCI_INIT_TIMEOUT);
951 break; 930 break;
952 931
953 case HCISETLINKPOL: 932 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 933 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 934 HCI_INIT_TIMEOUT);
956 break; 935 break;
957 936
958 case HCISETLINKMODE: 937 case HCISETLINKMODE:
@@ -1102,8 +1081,7 @@ static void hci_power_on(struct work_struct *work)
1102 return; 1081 return;
1103 1082
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1083 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off, 1084 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 1085
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1086 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev); 1087 mgmt_index_added(hdev);
@@ -1112,7 +1090,7 @@ static void hci_power_on(struct work_struct *work)
1112static void hci_power_off(struct work_struct *work) 1090static void hci_power_off(struct work_struct *work)
1113{ 1091{
1114 struct hci_dev *hdev = container_of(work, struct hci_dev, 1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work); 1093 power_off.work);
1116 1094
1117 BT_DBG("%s", hdev->name); 1095 BT_DBG("%s", hdev->name);
1118 1096
@@ -1193,7 +1171,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1193} 1171}
1194 1172
1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1173static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type) 1174 u8 key_type, u8 old_key_type)
1197{ 1175{
1198 /* Legacy key */ 1176 /* Legacy key */
1199 if (key_type < 0x03) 1177 if (key_type < 0x03)
@@ -1234,7 +1212,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1234 1212
1235 list_for_each_entry(k, &hdev->long_term_keys, list) { 1213 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv || 1214 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand))) 1215 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue; 1216 continue;
1239 1217
1240 return k; 1218 return k;
@@ -1242,7 +1220,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1242 1220
1243 return NULL; 1221 return NULL;
1244} 1222}
1245EXPORT_SYMBOL(hci_find_ltk);
1246 1223
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1224struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type) 1225 u8 addr_type)
@@ -1251,12 +1228,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 1228
1252 list_for_each_entry(k, &hdev->long_term_keys, list) 1229 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type && 1230 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0) 1231 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k; 1232 return k;
1256 1233
1257 return NULL; 1234 return NULL;
1258} 1235}
1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 1236
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1237int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1238 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1259,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1283 * combination key for legacy pairing even when there's no 1259 * combination key for legacy pairing even when there's no
1284 * previous key */ 1260 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION && 1261 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) && 1262 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1287 old_key_type == 0xff) {
1288 type = HCI_LK_COMBINATION; 1263 type = HCI_LK_COMBINATION;
1289 if (conn) 1264 if (conn)
1290 conn->key_type = type; 1265 conn->key_type = type;
1291 } 1266 }
1292 1267
1293 bacpy(&key->bdaddr, bdaddr); 1268 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16); 1269 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1295 key->pin_len = pin_len; 1270 key->pin_len = pin_len;
1296 1271
1297 if (type == HCI_LK_CHANGED_COMBINATION) 1272 if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1383,11 +1358,19 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1383} 1358}
1384 1359
1385/* HCI command timer function */ 1360/* HCI command timer function */
1386static void hci_cmd_timer(unsigned long arg) 1361static void hci_cmd_timeout(unsigned long arg)
1387{ 1362{
1388 struct hci_dev *hdev = (void *) arg; 1363 struct hci_dev *hdev = (void *) arg;
1389 1364
1390 BT_ERR("%s command tx timeout", hdev->name); 1365 if (hdev->sent_cmd) {
1366 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1367 u16 opcode = __le16_to_cpu(sent->opcode);
1368
1369 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1370 } else {
1371 BT_ERR("%s command tx timeout", hdev->name);
1372 }
1373
1391 atomic_set(&hdev->cmd_cnt, 1); 1374 atomic_set(&hdev->cmd_cnt, 1);
1392 queue_work(hdev->workqueue, &hdev->cmd_work); 1375 queue_work(hdev->workqueue, &hdev->cmd_work);
1393} 1376}
@@ -1540,6 +1523,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1540 1523
1541 memset(&cp, 0, sizeof(cp)); 1524 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1; 1525 cp.enable = 1;
1526 cp.filter_dup = 1;
1543 1527
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1528 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545} 1529}
@@ -1684,7 +1668,7 @@ struct hci_dev *hci_alloc_dev(void)
1684 1668
1685 init_waitqueue_head(&hdev->req_wait_q); 1669 init_waitqueue_head(&hdev->req_wait_q);
1686 1670
1687 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev); 1671 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1688 1672
1689 hci_init_sysfs(hdev); 1673 hci_init_sysfs(hdev);
1690 discovery_init(hdev); 1674 discovery_init(hdev);
@@ -1707,41 +1691,39 @@ EXPORT_SYMBOL(hci_free_dev);
1707/* Register HCI device */ 1691/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev) 1692int hci_register_dev(struct hci_dev *hdev)
1709{ 1693{
1710 struct list_head *head, *p;
1711 int id, error; 1694 int id, error;
1712 1695
1713 if (!hdev->open || !hdev->close) 1696 if (!hdev->open || !hdev->close)
1714 return -EINVAL; 1697 return -EINVAL;
1715 1698
1716 write_lock(&hci_dev_list_lock);
1717
1718 /* Do not allow HCI_AMP devices to register at index 0, 1699 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID. 1700 * so the index can be used as the AMP controller ID.
1720 */ 1701 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1702 switch (hdev->dev_type) {
1722 head = &hci_dev_list; 1703 case HCI_BREDR:
1723 1704 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1724 /* Find first available device id */ 1705 break;
1725 list_for_each(p, &hci_dev_list) { 1706 case HCI_AMP:
1726 int nid = list_entry(p, struct hci_dev, list)->id; 1707 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1727 if (nid > id) 1708 break;
1728 break; 1709 default:
1729 if (nid == id) 1710 return -EINVAL;
1730 id++;
1731 head = p;
1732 } 1711 }
1733 1712
1713 if (id < 0)
1714 return id;
1715
1734 sprintf(hdev->name, "hci%d", id); 1716 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id; 1717 hdev->id = id;
1736 1718
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 1720
1739 list_add(&hdev->list, head); 1721 write_lock(&hci_dev_list_lock);
1740 1722 list_add(&hdev->list, &hci_dev_list);
1741 write_unlock(&hci_dev_list_lock); 1723 write_unlock(&hci_dev_list_lock);
1742 1724
1743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1725 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 WQ_MEM_RECLAIM, 1); 1726 WQ_MEM_RECLAIM, 1);
1745 if (!hdev->workqueue) { 1727 if (!hdev->workqueue) {
1746 error = -ENOMEM; 1728 error = -ENOMEM;
1747 goto err; 1729 goto err;
@@ -1752,7 +1734,8 @@ int hci_register_dev(struct hci_dev *hdev)
1752 goto err_wqueue; 1734 goto err_wqueue;
1753 1735
1754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1736 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1737 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738 hdev);
1756 if (hdev->rfkill) { 1739 if (hdev->rfkill) {
1757 if (rfkill_register(hdev->rfkill) < 0) { 1740 if (rfkill_register(hdev->rfkill) < 0) {
1758 rfkill_destroy(hdev->rfkill); 1741 rfkill_destroy(hdev->rfkill);
@@ -1760,8 +1743,11 @@ int hci_register_dev(struct hci_dev *hdev)
1760 } 1743 }
1761 } 1744 }
1762 1745
1763 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1764 set_bit(HCI_SETUP, &hdev->dev_flags); 1746 set_bit(HCI_SETUP, &hdev->dev_flags);
1747
1748 if (hdev->dev_type != HCI_AMP)
1749 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1750
1765 schedule_work(&hdev->power_on); 1751 schedule_work(&hdev->power_on);
1766 1752
1767 hci_notify(hdev, HCI_DEV_REG); 1753 hci_notify(hdev, HCI_DEV_REG);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
1772err_wqueue: 1758err_wqueue:
1773 destroy_workqueue(hdev->workqueue); 1759 destroy_workqueue(hdev->workqueue);
1774err: 1760err:
1761 ida_simple_remove(&hci_index_ida, hdev->id);
1775 write_lock(&hci_dev_list_lock); 1762 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list); 1763 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock); 1764 write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
1783/* Unregister HCI device */ 1770/* Unregister HCI device */
1784void hci_unregister_dev(struct hci_dev *hdev) 1771void hci_unregister_dev(struct hci_dev *hdev)
1785{ 1772{
1786 int i; 1773 int i, id;
1787 1774
1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 1776
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 1777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 1778
1779 id = hdev->id;
1780
1792 write_lock(&hci_dev_list_lock); 1781 write_lock(&hci_dev_list_lock);
1793 list_del(&hdev->list); 1782 list_del(&hdev->list);
1794 write_unlock(&hci_dev_list_lock); 1783 write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1799 kfree_skb(hdev->reassembly[i]); 1788 kfree_skb(hdev->reassembly[i]);
1800 1789
1801 if (!test_bit(HCI_INIT, &hdev->flags) && 1790 if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 hci_dev_lock(hdev); 1792 hci_dev_lock(hdev);
1804 mgmt_index_removed(hdev); 1793 mgmt_index_removed(hdev);
1805 hci_dev_unlock(hdev); 1794 hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1829 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1830 1819
1831 hci_dev_put(hdev); 1820 hci_dev_put(hdev);
1821
1822 ida_simple_remove(&hci_index_ida, id);
1832} 1823}
1833EXPORT_SYMBOL(hci_unregister_dev); 1824EXPORT_SYMBOL(hci_unregister_dev);
1834 1825
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
1853{ 1844{
1854 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 && !test_bit(HCI_INIT, &hdev->flags))) { 1847 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 kfree_skb(skb); 1848 kfree_skb(skb);
1858 return -ENXIO; 1849 return -ENXIO;
1859 } 1850 }
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
1872EXPORT_SYMBOL(hci_recv_frame); 1863EXPORT_SYMBOL(hci_recv_frame);
1873 1864
1874static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 int count, __u8 index) 1866 int count, __u8 index)
1876{ 1867{
1877 int len = 0; 1868 int len = 0;
1878 int hlen = 0; 1869 int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1881 struct bt_skb_cb *scb; 1872 struct bt_skb_cb *scb;
1882 1873
1883 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 index >= NUM_REASSEMBLY) 1875 index >= NUM_REASSEMBLY)
1885 return -EILSEQ; 1876 return -EILSEQ;
1886 1877
1887 skb = hdev->reassembly[index]; 1878 skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2023 type = bt_cb(skb)->pkt_type; 2014 type = bt_cb(skb)->pkt_type;
2024 2015
2025 rem = hci_reassembly(hdev, type, data, count, 2016 rem = hci_reassembly(hdev, type, data, count,
2026 STREAM_REASSEMBLY); 2017 STREAM_REASSEMBLY);
2027 if (rem < 0) 2018 if (rem < 0)
2028 return rem; 2019 return rem;
2029 2020
@@ -2096,7 +2087,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2096 struct hci_command_hdr *hdr; 2087 struct hci_command_hdr *hdr;
2097 struct sk_buff *skb; 2088 struct sk_buff *skb;
2098 2089
2099 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); 2090 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2100 2091
2101 skb = bt_skb_alloc(len, GFP_ATOMIC); 2092 skb = bt_skb_alloc(len, GFP_ATOMIC);
2102 if (!skb) { 2093 if (!skb) {
@@ -2138,7 +2129,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2138 if (hdr->opcode != cpu_to_le16(opcode)) 2129 if (hdr->opcode != cpu_to_le16(opcode))
2139 return NULL; 2130 return NULL;
2140 2131
2141 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2132 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2142 2133
2143 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 2134 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2144} 2135}
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2157} 2148}
2158 2149
2159static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 struct sk_buff *skb, __u16 flags) 2151 struct sk_buff *skb, __u16 flags)
2161{ 2152{
2162 struct hci_dev *hdev = conn->hdev; 2153 struct hci_dev *hdev = conn->hdev;
2163 struct sk_buff *list; 2154 struct sk_buff *list;
@@ -2208,7 +2199,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2208 struct hci_conn *conn = chan->conn; 2199 struct hci_conn *conn = chan->conn;
2209 struct hci_dev *hdev = conn->hdev; 2200 struct hci_dev *hdev = conn->hdev;
2210 2201
2211 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); 2202 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2212 2203
2213 skb->dev = (void *) hdev; 2204 skb->dev = (void *) hdev;
2214 2205
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 2207
2217 queue_work(hdev->workqueue, &hdev->tx_work); 2208 queue_work(hdev->workqueue, &hdev->tx_work);
2218} 2209}
2219EXPORT_SYMBOL(hci_send_acl);
2220 2210
2221/* Send SCO data */ 2211/* Send SCO data */
2222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2239 skb_queue_tail(&conn->data_q, skb); 2229 skb_queue_tail(&conn->data_q, skb);
2240 queue_work(hdev->workqueue, &hdev->tx_work); 2230 queue_work(hdev->workqueue, &hdev->tx_work);
2241} 2231}
2242EXPORT_SYMBOL(hci_send_sco);
2243 2232
2244/* ---- HCI TX task (outgoing data) ---- */ 2233/* ---- HCI TX task (outgoing data) ---- */
2245 2234
2246/* HCI Connection scheduler */ 2235/* HCI Connection scheduler */
2247static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
2248{ 2238{
2249 struct hci_conn_hash *h = &hdev->conn_hash; 2239 struct hci_conn_hash *h = &hdev->conn_hash;
2250 struct hci_conn *conn = NULL, *c; 2240 struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2303 return conn; 2293 return conn;
2304} 2294}
2305 2295
2306static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307{ 2297{
2308 struct hci_conn_hash *h = &hdev->conn_hash; 2298 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *c; 2299 struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2316 list_for_each_entry_rcu(c, &h->list, list) { 2306 list_for_each_entry_rcu(c, &h->list, list) {
2317 if (c->type == type && c->sent) { 2307 if (c->type == type && c->sent) {
2318 BT_ERR("%s killing stalled connection %s", 2308 BT_ERR("%s killing stalled connection %s",
2319 hdev->name, batostr(&c->dst)); 2309 hdev->name, batostr(&c->dst));
2320 hci_acl_disconn(c, 0x13); 2310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 } 2311 }
2322 } 2312 }
2323 2313
2324 rcu_read_unlock(); 2314 rcu_read_unlock();
2325} 2315}
2326 2316
2327static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 int *quote) 2318 int *quote)
2329{ 2319{
2330 struct hci_conn_hash *h = &hdev->conn_hash; 2320 struct hci_conn_hash *h = &hdev->conn_hash;
2331 struct hci_chan *chan = NULL; 2321 struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442 skb->priority = HCI_PRIO_MAX - 1; 2432 skb->priority = HCI_PRIO_MAX - 1;
2443 2433
2444 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 skb->priority); 2435 skb->priority);
2446 } 2436 }
2447 2437
2448 if (hci_conn_num(hdev, type) == num) 2438 if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460} 2450}
2461 2451
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{ 2453{
2464 if (!test_bit(HCI_RAW, &hdev->flags)) { 2454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 /* ACL tx timeout must be longer than maximum 2455 /* ACL tx timeout must be longer than maximum
2466 * link supervision timeout (40.9 seconds) */ 2456 * link supervision timeout (40.9 seconds) */
2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2458 HCI_ACL_TX_TIMEOUT))
2469 hci_link_tx_to(hdev, ACL_LINK); 2459 hci_link_tx_to(hdev, ACL_LINK);
2470 } 2460 }
2471} 2461}
2472 2462
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2463static void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{ 2464{
2475 unsigned int cnt = hdev->acl_cnt; 2465 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan; 2466 struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2480 __check_timeout(hdev, cnt); 2470 __check_timeout(hdev, cnt);
2481 2471
2482 while (hdev->acl_cnt && 2472 while (hdev->acl_cnt &&
2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 u32 priority = (skb_peek(&chan->data_q))->priority; 2474 u32 priority = (skb_peek(&chan->data_q))->priority;
2485 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 skb->len, skb->priority); 2477 skb->len, skb->priority);
2488 2478
2489 /* Stop if priority has changed */ 2479 /* Stop if priority has changed */
2490 if (skb->priority < priority) 2480 if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508 hci_prio_recalculate(hdev, ACL_LINK); 2498 hci_prio_recalculate(hdev, ACL_LINK);
2509} 2499}
2510 2500
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2501static void hci_sched_acl_blk(struct hci_dev *hdev)
2512{ 2502{
2513 unsigned int cnt = hdev->block_cnt; 2503 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan; 2504 struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2518 __check_timeout(hdev, cnt); 2508 __check_timeout(hdev, cnt);
2519 2509
2520 while (hdev->block_cnt > 0 && 2510 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority; 2512 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks; 2514 int blocks;
2525 2515
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority); 2517 skb->len, skb->priority);
2528 2518
2529 /* Stop if priority has changed */ 2519 /* Stop if priority has changed */
2530 if (skb->priority < priority) 2520 if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2537 return; 2527 return;
2538 2528
2539 hci_conn_enter_active_mode(chan->conn, 2529 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active); 2530 bt_cb(skb)->force_active);
2541 2531
2542 hci_send_frame(skb); 2532 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies; 2533 hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2554 hci_prio_recalculate(hdev, ACL_LINK); 2544 hci_prio_recalculate(hdev, ACL_LINK);
2555} 2545}
2556 2546
2557static inline void hci_sched_acl(struct hci_dev *hdev) 2547static void hci_sched_acl(struct hci_dev *hdev)
2558{ 2548{
2559 BT_DBG("%s", hdev->name); 2549 BT_DBG("%s", hdev->name);
2560 2550
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2573} 2563}
2574 2564
2575/* Schedule SCO */ 2565/* Schedule SCO */
2576static inline void hci_sched_sco(struct hci_dev *hdev) 2566static void hci_sched_sco(struct hci_dev *hdev)
2577{ 2567{
2578 struct hci_conn *conn; 2568 struct hci_conn *conn;
2579 struct sk_buff *skb; 2569 struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2596 } 2586 }
2597} 2587}
2598 2588
2599static inline void hci_sched_esco(struct hci_dev *hdev) 2589static void hci_sched_esco(struct hci_dev *hdev)
2600{ 2590{
2601 struct hci_conn *conn; 2591 struct hci_conn *conn;
2602 struct sk_buff *skb; 2592 struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2607 if (!hci_conn_num(hdev, ESCO_LINK)) 2597 if (!hci_conn_num(hdev, ESCO_LINK))
2608 return; 2598 return;
2609 2599
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len); 2603 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb); 2604 hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2619 } 2610 }
2620} 2611}
2621 2612
2622static inline void hci_sched_le(struct hci_dev *hdev) 2613static void hci_sched_le(struct hci_dev *hdev)
2623{ 2614{
2624 struct hci_chan *chan; 2615 struct hci_chan *chan;
2625 struct sk_buff *skb; 2616 struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2634 /* LE tx timeout must be longer than maximum 2625 /* LE tx timeout must be longer than maximum
2635 * link supervision timeout (40.9 seconds) */ 2626 * link supervision timeout (40.9 seconds) */
2636 if (!hdev->le_cnt && hdev->le_pkts && 2627 if (!hdev->le_cnt && hdev->le_pkts &&
2637 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 hci_link_tx_to(hdev, LE_LINK); 2629 hci_link_tx_to(hdev, LE_LINK);
2639 } 2630 }
2640 2631
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2644 u32 priority = (skb_peek(&chan->data_q))->priority; 2635 u32 priority = (skb_peek(&chan->data_q))->priority;
2645 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 skb->len, skb->priority); 2638 skb->len, skb->priority);
2648 2639
2649 /* Stop if priority has changed */ 2640 /* Stop if priority has changed */
2650 if (skb->priority < priority) 2641 if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
2676 struct sk_buff *skb; 2667 struct sk_buff *skb;
2677 2668
2678 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 hdev->sco_cnt, hdev->le_cnt); 2670 hdev->sco_cnt, hdev->le_cnt);
2680 2671
2681 /* Schedule queues and send stuff to HCI driver */ 2672 /* Schedule queues and send stuff to HCI driver */
2682 2673
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
2696/* ----- HCI RX task (incoming data processing) ----- */ 2687/* ----- HCI RX task (incoming data processing) ----- */
2697 2688
2698/* ACL data packet */ 2689/* ACL data packet */
2699static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700{ 2691{
2701 struct hci_acl_hdr *hdr = (void *) skb->data; 2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2702 struct hci_conn *conn; 2693 struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2708 flags = hci_flags(handle); 2699 flags = hci_flags(handle);
2709 handle = hci_handle(handle); 2700 handle = hci_handle(handle);
2710 2701
2711 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2702 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2703 handle, flags);
2712 2704
2713 hdev->stat.acl_rx++; 2705 hdev->stat.acl_rx++;
2714 2706
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 return; 2724 return;
2733 } else { 2725 } else {
2734 BT_ERR("%s ACL packet for unknown connection handle %d", 2726 BT_ERR("%s ACL packet for unknown connection handle %d",
2735 hdev->name, handle); 2727 hdev->name, handle);
2736 } 2728 }
2737 2729
2738 kfree_skb(skb); 2730 kfree_skb(skb);
2739} 2731}
2740 2732
2741/* SCO data packet */ 2733/* SCO data packet */
2742static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{ 2735{
2744 struct hci_sco_hdr *hdr = (void *) skb->data; 2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2745 struct hci_conn *conn; 2737 struct hci_conn *conn;
@@ -2749,7 +2741,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2749 2741
2750 handle = __le16_to_cpu(hdr->handle); 2742 handle = __le16_to_cpu(hdr->handle);
2751 2743
2752 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 2744 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2753 2745
2754 hdev->stat.sco_rx++; 2746 hdev->stat.sco_rx++;
2755 2747
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2763 return; 2755 return;
2764 } else { 2756 } else {
2765 BT_ERR("%s SCO packet for unknown connection handle %d", 2757 BT_ERR("%s SCO packet for unknown connection handle %d",
2766 hdev->name, handle); 2758 hdev->name, handle);
2767 } 2759 }
2768 2760
2769 kfree_skb(skb); 2761 kfree_skb(skb);
@@ -2829,7 +2821,8 @@ static void hci_cmd_work(struct work_struct *work)
2829 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 2821 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2830 struct sk_buff *skb; 2822 struct sk_buff *skb;
2831 2823
2832 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2824 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2825 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2833 2826
2834 /* Send queued commands */ 2827 /* Send queued commands */
2835 if (atomic_read(&hdev->cmd_cnt)) { 2828 if (atomic_read(&hdev->cmd_cnt)) {
@@ -2847,7 +2840,7 @@ static void hci_cmd_work(struct work_struct *work)
2847 del_timer(&hdev->cmd_timer); 2840 del_timer(&hdev->cmd_timer);
2848 else 2841 else
2849 mod_timer(&hdev->cmd_timer, 2842 mod_timer(&hdev->cmd_timer,
2850 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2843 jiffies + HCI_CMD_TIMEOUT);
2851 } else { 2844 } else {
2852 skb_queue_head(&hdev->cmd_q, skb); 2845 skb_queue_head(&hdev->cmd_q, skb);
2853 queue_work(hdev->workqueue, &hdev->cmd_work); 2846 queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94ad124a4ea3..41ff978a33f9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h> 28#include <asm/unaligned.h>
42 29
43#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -49,7 +36,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49{ 36{
50 __u8 status = *((__u8 *) skb->data); 37 __u8 status = *((__u8 *) skb->data);
51 38
52 BT_DBG("%s status 0x%x", hdev->name, status); 39 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 40
54 if (status) { 41 if (status) {
55 hci_dev_lock(hdev); 42 hci_dev_lock(hdev);
@@ -73,7 +60,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{ 60{
74 __u8 status = *((__u8 *) skb->data); 61 __u8 status = *((__u8 *) skb->data);
75 62
76 BT_DBG("%s status 0x%x", hdev->name, status); 63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
77 64
78 if (status) 65 if (status)
79 return; 66 return;
@@ -85,7 +72,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
85{ 72{
86 __u8 status = *((__u8 *) skb->data); 73 __u8 status = *((__u8 *) skb->data);
87 74
88 BT_DBG("%s status 0x%x", hdev->name, status); 75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
89 76
90 if (status) 77 if (status)
91 return; 78 return;
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
95 hci_conn_check_pending(hdev); 82 hci_conn_check_pending(hdev);
96} 83}
97 84
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
99{ 87{
100 BT_DBG("%s", hdev->name); 88 BT_DBG("%s", hdev->name);
101} 89}
@@ -105,7 +93,7 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
105 struct hci_rp_role_discovery *rp = (void *) skb->data; 93 struct hci_rp_role_discovery *rp = (void *) skb->data;
106 struct hci_conn *conn; 94 struct hci_conn *conn;
107 95
108 BT_DBG("%s status 0x%x", hdev->name, rp->status); 96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
109 97
110 if (rp->status) 98 if (rp->status)
111 return; 99 return;
@@ -128,7 +116,7 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 struct hci_rp_read_link_policy *rp = (void *) skb->data; 116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
129 struct hci_conn *conn; 117 struct hci_conn *conn;
130 118
131 BT_DBG("%s status 0x%x", hdev->name, rp->status); 119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 120
133 if (rp->status) 121 if (rp->status)
134 return; 122 return;
@@ -148,7 +136,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
148 struct hci_conn *conn; 136 struct hci_conn *conn;
149 void *sent; 137 void *sent;
150 138
151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
152 140
153 if (rp->status) 141 if (rp->status)
154 return; 142 return;
@@ -166,11 +154,12 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
166 hci_dev_unlock(hdev); 154 hci_dev_unlock(hdev);
167} 155}
168 156
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
170{ 159{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172 161
173 BT_DBG("%s status 0x%x", hdev->name, rp->status); 162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
174 163
175 if (rp->status) 164 if (rp->status)
176 return; 165 return;
@@ -178,12 +167,13 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
178 hdev->link_policy = __le16_to_cpu(rp->policy); 167 hdev->link_policy = __le16_to_cpu(rp->policy);
179} 168}
180 169
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
182{ 172{
183 __u8 status = *((__u8 *) skb->data); 173 __u8 status = *((__u8 *) skb->data);
184 void *sent; 174 void *sent;
185 175
186 BT_DBG("%s status 0x%x", hdev->name, status); 176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 177
188 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
189 if (!sent) 179 if (!sent)
@@ -199,7 +189,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
199{ 189{
200 __u8 status = *((__u8 *) skb->data); 190 __u8 status = *((__u8 *) skb->data);
201 191
202 BT_DBG("%s status 0x%x", hdev->name, status); 192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
203 193
204 clear_bit(HCI_RESET, &hdev->flags); 194 clear_bit(HCI_RESET, &hdev->flags);
205 195
@@ -217,7 +207,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
217 __u8 status = *((__u8 *) skb->data); 207 __u8 status = *((__u8 *) skb->data);
218 void *sent; 208 void *sent;
219 209
220 BT_DBG("%s status 0x%x", hdev->name, status); 210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221 211
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent) 213 if (!sent)
@@ -239,7 +229,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239{ 229{
240 struct hci_rp_read_local_name *rp = (void *) skb->data; 230 struct hci_rp_read_local_name *rp = (void *) skb->data;
241 231
242 BT_DBG("%s status 0x%x", hdev->name, rp->status); 232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243 233
244 if (rp->status) 234 if (rp->status)
245 return; 235 return;
@@ -253,7 +243,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
253 __u8 status = *((__u8 *) skb->data); 243 __u8 status = *((__u8 *) skb->data);
254 void *sent; 244 void *sent;
255 245
256 BT_DBG("%s status 0x%x", hdev->name, status); 246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
257 247
258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
259 if (!sent) 249 if (!sent)
@@ -279,7 +269,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
279 __u8 status = *((__u8 *) skb->data); 269 __u8 status = *((__u8 *) skb->data);
280 void *sent; 270 void *sent;
281 271
282 BT_DBG("%s status 0x%x", hdev->name, status); 272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 273
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent) 275 if (!sent)
@@ -303,7 +293,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
303 int old_pscan, old_iscan; 293 int old_pscan, old_iscan;
304 void *sent; 294 void *sent;
305 295
306 BT_DBG("%s status 0x%x", hdev->name, status); 296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
307 297
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 298 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent) 299 if (!sent)
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
329 if (hdev->discov_timeout > 0) { 319 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to); 322 to);
333 } 323 }
334 } else if (old_iscan) 324 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0); 325 mgmt_discoverable(hdev, 0);
@@ -350,7 +340,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{ 340{
351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
352 342
353 BT_DBG("%s status 0x%x", hdev->name, rp->status); 343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
354 344
355 if (rp->status) 345 if (rp->status)
356 return; 346 return;
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 memcpy(hdev->dev_class, rp->dev_class, 3); 348 memcpy(hdev->dev_class, rp->dev_class, 3);
359 349
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362} 352}
363 353
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -366,7 +356,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
366 __u8 status = *((__u8 *) skb->data); 356 __u8 status = *((__u8 *) skb->data);
367 void *sent; 357 void *sent;
368 358
369 BT_DBG("%s status 0x%x", hdev->name, status); 359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 360
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
372 if (!sent) 362 if (!sent)
@@ -388,7 +378,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
388 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
389 __u16 setting; 379 __u16 setting;
390 380
391 BT_DBG("%s status 0x%x", hdev->name, rp->status); 381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
392 382
393 if (rp->status) 383 if (rp->status)
394 return; 384 return;
@@ -400,19 +390,20 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
400 390
401 hdev->voice_setting = setting; 391 hdev->voice_setting = setting;
402 392
403 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
404 394
405 if (hdev->notify) 395 if (hdev->notify)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407} 397}
408 398
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
410{ 401{
411 __u8 status = *((__u8 *) skb->data); 402 __u8 status = *((__u8 *) skb->data);
412 __u16 setting; 403 __u16 setting;
413 void *sent; 404 void *sent;
414 405
415 BT_DBG("%s status 0x%x", hdev->name, status); 406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
416 407
417 if (status) 408 if (status)
418 return; 409 return;
@@ -428,7 +419,7 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
428 419
429 hdev->voice_setting = setting; 420 hdev->voice_setting = setting;
430 421
431 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
432 423
433 if (hdev->notify) 424 if (hdev->notify)
434 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
@@ -438,7 +429,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
438{ 429{
439 __u8 status = *((__u8 *) skb->data); 430 __u8 status = *((__u8 *) skb->data);
440 431
441 BT_DBG("%s status 0x%x", hdev->name, status); 432 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442 433
443 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 434 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
444} 435}
@@ -448,7 +439,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448 __u8 status = *((__u8 *) skb->data); 439 __u8 status = *((__u8 *) skb->data);
449 void *sent; 440 void *sent;
450 441
451 BT_DBG("%s status 0x%x", hdev->name, status); 442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
452 443
453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
454 if (!sent) 445 if (!sent)
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
473 return 1; 464 return 1;
474 465
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757) 467 hdev->lmp_subver == 0x0757)
477 return 1; 468 return 1;
478 469
479 if (hdev->manufacturer == 15) { 470 if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
486 } 477 }
487 478
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805) 480 hdev->lmp_subver == 0x1805)
490 return 1; 481 return 1;
491 482
492 return 0; 483 return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568 559
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 560 if (lmp_ssp_capable(hdev)) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01; 562 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -606,7 +597,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606{ 597{
607 struct hci_rp_read_local_version *rp = (void *) skb->data; 598 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 599
609 BT_DBG("%s status 0x%x", hdev->name, rp->status); 600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 601
611 if (rp->status) 602 if (rp->status)
612 goto done; 603 goto done;
@@ -617,9 +608,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
617 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 608 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619 610
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 611 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
621 hdev->manufacturer, 612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
622 hdev->hci_ver, hdev->hci_rev);
623 613
624 if (test_bit(HCI_INIT, &hdev->flags)) 614 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev); 615 hci_setup(hdev);
@@ -646,11 +636,12 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647} 637}
648 638
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 639static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
650{ 641{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652 643
653 BT_DBG("%s status 0x%x", hdev->name, rp->status); 644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
654 645
655 if (rp->status) 646 if (rp->status)
656 goto done; 647 goto done;
@@ -664,11 +655,12 @@ done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665} 656}
666 657
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 658static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
668{ 660{
669 struct hci_rp_read_local_features *rp = (void *) skb->data; 661 struct hci_rp_read_local_features *rp = (void *) skb->data;
670 662
671 BT_DBG("%s status 0x%x", hdev->name, rp->status); 663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
672 664
673 if (rp->status) 665 if (rp->status)
674 return; 666 return;
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714 706
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1], 708 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3], 709 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5], 710 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]); 711 hdev->features[6], hdev->features[7]);
720} 712}
721 713
722static void hci_set_le_support(struct hci_dev *hdev) 714static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,11 +728,11 @@ static void hci_set_le_support(struct hci_dev *hdev)
736} 728}
737 729
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 730static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb) 731 struct sk_buff *skb)
740{ 732{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742 734
743 BT_DBG("%s status 0x%x", hdev->name, rp->status); 735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
744 736
745 if (rp->status) 737 if (rp->status)
746 goto done; 738 goto done;
@@ -762,11 +754,11 @@ done:
762} 754}
763 755
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 756static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb) 757 struct sk_buff *skb)
766{ 758{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768 760
769 BT_DBG("%s status 0x%x", hdev->name, rp->status); 761 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770 762
771 if (rp->status) 763 if (rp->status)
772 return; 764 return;
@@ -780,7 +772,7 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
780{ 772{
781 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 773 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
782 774
783 BT_DBG("%s status 0x%x", hdev->name, rp->status); 775 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
784 776
785 if (rp->status) 777 if (rp->status)
786 return; 778 return;
@@ -798,16 +790,15 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
798 hdev->acl_cnt = hdev->acl_pkts; 790 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts; 791 hdev->sco_cnt = hdev->sco_pkts;
800 792
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
802 hdev->acl_mtu, hdev->acl_pkts, 794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
803 hdev->sco_mtu, hdev->sco_pkts);
804} 795}
805 796
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 797static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
807{ 798{
808 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
809 800
810 BT_DBG("%s status 0x%x", hdev->name, rp->status); 801 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811 802
812 if (!rp->status) 803 if (!rp->status)
813 bacpy(&hdev->bdaddr, &rp->bdaddr); 804 bacpy(&hdev->bdaddr, &rp->bdaddr);
@@ -816,11 +807,11 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
816} 807}
817 808
818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 809static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb) 810 struct sk_buff *skb)
820{ 811{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822 813
823 BT_DBG("%s status 0x%x", hdev->name, rp->status); 814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824 815
825 if (rp->status) 816 if (rp->status)
826 return; 817 return;
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 hdev->block_cnt = hdev->num_blocks; 823 hdev->block_cnt = hdev->num_blocks;
833 824
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len); 826 hdev->block_cnt, hdev->block_len);
836 827
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838} 829}
@@ -841,17 +832,17 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
841{ 832{
842 __u8 status = *((__u8 *) skb->data); 833 __u8 status = *((__u8 *) skb->data);
843 834
844 BT_DBG("%s status 0x%x", hdev->name, status); 835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
845 836
846 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
847} 838}
848 839
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 840static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb) 841 struct sk_buff *skb)
851{ 842{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853 844
854 BT_DBG("%s status 0x%x", hdev->name, rp->status); 845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 846
856 if (rp->status) 847 if (rp->status)
857 return; 848 return;
@@ -871,11 +862,11 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
871} 862}
872 863
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 864static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb) 865 struct sk_buff *skb)
875{ 866{
876 __u8 status = *((__u8 *) skb->data); 867 __u8 status = *((__u8 *) skb->data);
877 868
878 BT_DBG("%s status 0x%x", hdev->name, status); 869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
879 870
880 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
881} 872}
@@ -884,27 +875,27 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
884{ 875{
885 __u8 status = *((__u8 *) skb->data); 876 __u8 status = *((__u8 *) skb->data);
886 877
887 BT_DBG("%s status 0x%x", hdev->name, status); 878 BT_DBG("%s status 0x%2.2x", hdev->name, status);
888 879
889 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
890} 881}
891 882
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 883static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb) 884 struct sk_buff *skb)
894{ 885{
895 __u8 status = *((__u8 *) skb->data); 886 __u8 status = *((__u8 *) skb->data);
896 887
897 BT_DBG("%s status 0x%x", hdev->name, status); 888 BT_DBG("%s status 0x%2.2x", hdev->name, status);
898 889
899 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
900} 891}
901 892
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 893static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb) 894 struct sk_buff *skb)
904{ 895{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 897
907 BT_DBG("%s status 0x%x", hdev->name, rp->status); 898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 899
909 if (!rp->status) 900 if (!rp->status)
910 hdev->inq_tx_power = rp->tx_power; 901 hdev->inq_tx_power = rp->tx_power;
@@ -916,7 +907,7 @@ static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
916{ 907{
917 __u8 status = *((__u8 *) skb->data); 908 __u8 status = *((__u8 *) skb->data);
918 909
919 BT_DBG("%s status 0x%x", hdev->name, status); 910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
920 911
921 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 912 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
922} 913}
@@ -927,7 +918,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
927 struct hci_cp_pin_code_reply *cp; 918 struct hci_cp_pin_code_reply *cp;
928 struct hci_conn *conn; 919 struct hci_conn *conn;
929 920
930 BT_DBG("%s status 0x%x", hdev->name, rp->status); 921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931 922
932 hci_dev_lock(hdev); 923 hci_dev_lock(hdev);
933 924
@@ -953,13 +944,13 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
953{ 944{
954 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 945 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
955 946
956 BT_DBG("%s status 0x%x", hdev->name, rp->status); 947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 948
958 hci_dev_lock(hdev); 949 hci_dev_lock(hdev);
959 950
960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status); 953 rp->status);
963 954
964 hci_dev_unlock(hdev); 955 hci_dev_unlock(hdev);
965} 956}
@@ -969,7 +960,7 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
969{ 960{
970 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 961 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
971 962
972 BT_DBG("%s status 0x%x", hdev->name, rp->status); 963 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973 964
974 if (rp->status) 965 if (rp->status)
975 return; 966 return;
@@ -988,7 +979,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
988{ 979{
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990 981
991 BT_DBG("%s status 0x%x", hdev->name, rp->status); 982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992 983
993 hci_dev_lock(hdev); 984 hci_dev_lock(hdev);
994 985
@@ -1000,11 +991,11 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000} 991}
1001 992
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 993static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb) 994 struct sk_buff *skb)
1004{ 995{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006 997
1007 BT_DBG("%s status 0x%x", hdev->name, rp->status); 998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 999
1009 hci_dev_lock(hdev); 1000 hci_dev_lock(hdev);
1010 1001
@@ -1019,7 +1010,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1019{ 1010{
1020 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021 1012
1022 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023 1014
1024 hci_dev_lock(hdev); 1015 hci_dev_lock(hdev);
1025 1016
@@ -1031,11 +1022,11 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031} 1022}
1032 1023
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1024static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb) 1025 struct sk_buff *skb)
1035{ 1026{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037 1028
1038 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1039 1030
1040 hci_dev_lock(hdev); 1031 hci_dev_lock(hdev);
1041 1032
@@ -1047,11 +1038,11 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1047} 1038}
1048 1039
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1040static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb) 1041 struct sk_buff *skb)
1051{ 1042{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053 1044
1054 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1045 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1055 1046
1056 hci_dev_lock(hdev); 1047 hci_dev_lock(hdev);
1057 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, 1048 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
@@ -1063,7 +1054,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1063{ 1054{
1064 __u8 status = *((__u8 *) skb->data); 1055 __u8 status = *((__u8 *) skb->data);
1065 1056
1066 BT_DBG("%s status 0x%x", hdev->name, status); 1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1067 1058
1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); 1059 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1069 1060
@@ -1076,12 +1067,12 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076} 1067}
1077 1068
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1069static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb) 1070 struct sk_buff *skb)
1080{ 1071{
1081 struct hci_cp_le_set_scan_enable *cp; 1072 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data); 1073 __u8 status = *((__u8 *) skb->data);
1083 1074
1084 BT_DBG("%s status 0x%x", hdev->name, status); 1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085 1076
1086 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1087 if (!cp) 1078 if (!cp)
@@ -1136,7 +1127,7 @@ static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1136{ 1127{
1137 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1128 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1138 1129
1139 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1140 1131
1141 if (rp->status) 1132 if (rp->status)
1142 return; 1133 return;
@@ -1148,7 +1139,7 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1148{ 1139{
1149 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 1140 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1150 1141
1151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1152 1143
1153 if (rp->status) 1144 if (rp->status)
1154 return; 1145 return;
@@ -1156,13 +1147,13 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157} 1148}
1158 1149
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1150static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb) 1151 struct sk_buff *skb)
1161{ 1152{
1162 struct hci_cp_write_le_host_supported *sent; 1153 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data); 1154 __u8 status = *((__u8 *) skb->data);
1164 1155
1165 BT_DBG("%s status 0x%x", hdev->name, status); 1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 1157
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1168 if (!sent) 1159 if (!sent)
@@ -1176,15 +1167,15 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1176 } 1167 }
1177 1168
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags)) 1170 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status); 1171 mgmt_le_enable_complete(hdev, sent->le, status);
1181 1172
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183} 1174}
1184 1175
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1176static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{ 1177{
1187 BT_DBG("%s status 0x%x", hdev->name, status); 1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1188 1179
1189 if (status) { 1180 if (status) {
1190 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1181 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1203,12 +1194,12 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1203 hci_dev_unlock(hdev); 1194 hci_dev_unlock(hdev);
1204} 1195}
1205 1196
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1197static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{ 1198{
1208 struct hci_cp_create_conn *cp; 1199 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn; 1200 struct hci_conn *conn;
1210 1201
1211 BT_DBG("%s status 0x%x", hdev->name, status); 1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212 1203
1213 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1204 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1214 if (!cp) 1205 if (!cp)
@@ -1218,7 +1209,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1218 1209
1219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1220 1211
1221 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 1212 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1222 1213
1223 if (status) { 1214 if (status) {
1224 if (conn && conn->state == BT_CONNECT) { 1215 if (conn && conn->state == BT_CONNECT) {
@@ -1249,7 +1240,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1249 struct hci_conn *acl, *sco; 1240 struct hci_conn *acl, *sco;
1250 __u16 handle; 1241 __u16 handle;
1251 1242
1252 BT_DBG("%s status 0x%x", hdev->name, status); 1243 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 1244
1254 if (!status) 1245 if (!status)
1255 return; 1246 return;
@@ -1260,7 +1251,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1260 1251
1261 handle = __le16_to_cpu(cp->handle); 1252 handle = __le16_to_cpu(cp->handle);
1262 1253
1263 BT_DBG("%s handle %d", hdev->name, handle); 1254 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1264 1255
1265 hci_dev_lock(hdev); 1256 hci_dev_lock(hdev);
1266 1257
@@ -1283,7 +1274,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1283 struct hci_cp_auth_requested *cp; 1274 struct hci_cp_auth_requested *cp;
1284 struct hci_conn *conn; 1275 struct hci_conn *conn;
1285 1276
1286 BT_DBG("%s status 0x%x", hdev->name, status); 1277 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1287 1278
1288 if (!status) 1279 if (!status)
1289 return; 1280 return;
@@ -1310,7 +1301,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1310 struct hci_cp_set_conn_encrypt *cp; 1301 struct hci_cp_set_conn_encrypt *cp;
1311 struct hci_conn *conn; 1302 struct hci_conn *conn;
1312 1303
1313 BT_DBG("%s status 0x%x", hdev->name, status); 1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1314 1305
1315 if (!status) 1306 if (!status)
1316 return; 1307 return;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1333} 1324}
1334 1325
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1326static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn) 1327 struct hci_conn *conn)
1337{ 1328{
1338 if (conn->state != BT_CONFIG || !conn->out) 1329 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0; 1330 return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1343 1334
1344 /* Only request authentication for SSP connections or non-SSP 1335 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */ 1336 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) && 1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1348 !(conn->auth_type & 0x01))
1349 return 0; 1339 return 0;
1350 1340
1351 return 1; 1341 return 1;
1352} 1342}
1353 1343
1354static inline int hci_resolve_name(struct hci_dev *hdev, 1344static int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e) 1345 struct inquiry_entry *e)
1356{ 1346{
1357 struct hci_cp_remote_name_req cp; 1347 struct hci_cp_remote_name_req cp;
@@ -1423,7 +1413,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1423 struct hci_cp_remote_name_req *cp; 1413 struct hci_cp_remote_name_req *cp;
1424 struct hci_conn *conn; 1414 struct hci_conn *conn;
1425 1415
1426 BT_DBG("%s status 0x%x", hdev->name, status); 1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1427 1417
1428 /* If successful wait for the name req complete event before 1418 /* If successful wait for the name req complete event before
1429 * checking for the need to do authentication */ 1419 * checking for the need to do authentication */
@@ -1462,7 +1452,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1462 struct hci_cp_read_remote_features *cp; 1452 struct hci_cp_read_remote_features *cp;
1463 struct hci_conn *conn; 1453 struct hci_conn *conn;
1464 1454
1465 BT_DBG("%s status 0x%x", hdev->name, status); 1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1466 1456
1467 if (!status) 1457 if (!status)
1468 return; 1458 return;
@@ -1489,7 +1479,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1489 struct hci_cp_read_remote_ext_features *cp; 1479 struct hci_cp_read_remote_ext_features *cp;
1490 struct hci_conn *conn; 1480 struct hci_conn *conn;
1491 1481
1492 BT_DBG("%s status 0x%x", hdev->name, status); 1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1493 1483
1494 if (!status) 1484 if (!status)
1495 return; 1485 return;
@@ -1517,7 +1507,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1517 struct hci_conn *acl, *sco; 1507 struct hci_conn *acl, *sco;
1518 __u16 handle; 1508 __u16 handle;
1519 1509
1520 BT_DBG("%s status 0x%x", hdev->name, status); 1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 1511
1522 if (!status) 1512 if (!status)
1523 return; 1513 return;
@@ -1528,7 +1518,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1528 1518
1529 handle = __le16_to_cpu(cp->handle); 1519 handle = __le16_to_cpu(cp->handle);
1530 1520
1531 BT_DBG("%s handle %d", hdev->name, handle); 1521 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1532 1522
1533 hci_dev_lock(hdev); 1523 hci_dev_lock(hdev);
1534 1524
@@ -1551,7 +1541,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1551 struct hci_cp_sniff_mode *cp; 1541 struct hci_cp_sniff_mode *cp;
1552 struct hci_conn *conn; 1542 struct hci_conn *conn;
1553 1543
1554 BT_DBG("%s status 0x%x", hdev->name, status); 1544 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555 1545
1556 if (!status) 1546 if (!status)
1557 return; 1547 return;
@@ -1578,7 +1568,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1578 struct hci_cp_exit_sniff_mode *cp; 1568 struct hci_cp_exit_sniff_mode *cp;
1579 struct hci_conn *conn; 1569 struct hci_conn *conn;
1580 1570
1581 BT_DBG("%s status 0x%x", hdev->name, status); 1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 1572
1583 if (!status) 1573 if (!status)
1584 return; 1574 return;
@@ -1627,7 +1617,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1627 struct hci_cp_le_create_conn *cp; 1617 struct hci_cp_le_create_conn *cp;
1628 struct hci_conn *conn; 1618 struct hci_conn *conn;
1629 1619
1630 BT_DBG("%s status 0x%x", hdev->name, status); 1620 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1631 1621
1632 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1622 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1633 if (!cp) 1623 if (!cp)
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639 1629
1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641 conn); 1631 conn);
1642 1632
1643 if (status) { 1633 if (status) {
1644 if (conn && conn->state == BT_CONNECT) { 1634 if (conn && conn->state == BT_CONNECT) {
@@ -1665,16 +1655,16 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1665 1655
1666static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1656static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1667{ 1657{
1668 BT_DBG("%s status 0x%x", hdev->name, status); 1658 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669} 1659}
1670 1660
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1672{ 1662{
1673 __u8 status = *((__u8 *) skb->data); 1663 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery; 1664 struct discovery_state *discov = &hdev->discovery;
1675 struct inquiry_entry *e; 1665 struct inquiry_entry *e;
1676 1666
1677 BT_DBG("%s status %d", hdev->name, status); 1667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678 1668
1679 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1669 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1680 1670
@@ -1708,7 +1698,7 @@ unlock:
1708 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1709} 1699}
1710 1700
1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1701static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1712{ 1702{
1713 struct inquiry_data data; 1703 struct inquiry_data data;
1714 struct inquiry_info *info = (void *) (skb->data + 1); 1704 struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1745 hci_dev_unlock(hdev); 1735 hci_dev_unlock(hdev);
1746} 1736}
1747 1737
1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1738static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{ 1739{
1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1751 struct hci_conn *conn; 1741 struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
1823 hci_conn_check_pending(hdev); 1813 hci_conn_check_pending(hdev);
1824} 1814}
1825 1815
1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1816static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827{ 1817{
1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1829 int mask = hdev->link_mode; 1819 int mask = hdev->link_mode;
1830 1820
1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1832 batostr(&ev->bdaddr), ev->link_type); 1822 ev->link_type);
1833 1823
1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1835 1825
1836 if ((mask & HCI_LM_ACCEPT) && 1826 if ((mask & HCI_LM_ACCEPT) &&
1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1838 /* Connection accepted */ 1828 /* Connection accepted */
1839 struct inquiry_entry *ie; 1829 struct inquiry_entry *ie;
1840 struct hci_conn *conn; 1830 struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1845 if (ie) 1835 if (ie)
1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1847 1837
1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1849 if (!conn) { 1840 if (!conn) {
1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) { 1842 if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1880 1871
1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1883 cp.max_latency = cpu_to_le16(0xffff); 1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1885 cp.retrans_effort = 0xff; 1876 cp.retrans_effort = 0xff;
1886 1877
@@ -1897,12 +1888,12 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1897 } 1888 }
1898} 1889}
1899 1890
1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1891static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{ 1892{
1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1903 struct hci_conn *conn; 1894 struct hci_conn *conn;
1904 1895
1905 BT_DBG("%s status %d", hdev->name, ev->status); 1896 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1906 1897
1907 hci_dev_lock(hdev); 1898 hci_dev_lock(hdev);
1908 1899
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1914 conn->state = BT_CLOSED; 1905 conn->state = BT_CLOSED;
1915 1906
1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1918 if (ev->status != 0) 1909 if (ev->status != 0)
1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status); 1911 conn->dst_type, ev->status);
1921 else 1912 else
1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1923 conn->dst_type); 1914 conn->dst_type);
@@ -1934,12 +1925,12 @@ unlock:
1934 hci_dev_unlock(hdev); 1925 hci_dev_unlock(hdev);
1935} 1926}
1936 1927
1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1928static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938{ 1929{
1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1940 struct hci_conn *conn; 1931 struct hci_conn *conn;
1941 1932
1942 BT_DBG("%s status %d", hdev->name, ev->status); 1933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1943 1934
1944 hci_dev_lock(hdev); 1935 hci_dev_lock(hdev);
1945 1936
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1949 1940
1950 if (!ev->status) { 1941 if (!ev->status) {
1951 if (!hci_conn_ssp_enabled(conn) && 1942 if (!hci_conn_ssp_enabled(conn) &&
1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1953 BT_INFO("re-auth of legacy device is not possible."); 1944 BT_INFO("re-auth of legacy device is not possible.");
1954 } else { 1945 } else {
1955 conn->link_mode |= HCI_LM_AUTH; 1946 conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1969 cp.handle = ev->handle; 1960 cp.handle = ev->handle;
1970 cp.encrypt = 0x01; 1961 cp.encrypt = 0x01;
1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1972 &cp); 1963 &cp);
1973 } else { 1964 } else {
1974 conn->state = BT_CONNECTED; 1965 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status); 1966 hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1989 cp.handle = ev->handle; 1980 cp.handle = ev->handle;
1990 cp.encrypt = 0x01; 1981 cp.encrypt = 0x01;
1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1992 &cp); 1983 &cp);
1993 } else { 1984 } else {
1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1986 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
2000 hci_dev_unlock(hdev); 1991 hci_dev_unlock(hdev);
2001} 1992}
2002 1993
2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1994static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004{ 1995{
2005 struct hci_ev_remote_name *ev = (void *) skb->data; 1996 struct hci_ev_remote_name *ev = (void *) skb->data;
2006 struct hci_conn *conn; 1997 struct hci_conn *conn;
@@ -2039,12 +2030,12 @@ unlock:
2039 hci_dev_unlock(hdev); 2030 hci_dev_unlock(hdev);
2040} 2031}
2041 2032
2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2033static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043{ 2034{
2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2045 struct hci_conn *conn; 2036 struct hci_conn *conn;
2046 2037
2047 BT_DBG("%s status %d", hdev->name, ev->status); 2038 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2048 2039
2049 hci_dev_lock(hdev); 2040 hci_dev_lock(hdev);
2050 2041
@@ -2082,12 +2073,13 @@ unlock:
2082 hci_dev_unlock(hdev); 2073 hci_dev_unlock(hdev);
2083} 2074}
2084 2075
2085static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2076static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2086{ 2078{
2087 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2088 struct hci_conn *conn; 2080 struct hci_conn *conn;
2089 2081
2090 BT_DBG("%s status %d", hdev->name, ev->status); 2082 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2091 2083
2092 hci_dev_lock(hdev); 2084 hci_dev_lock(hdev);
2093 2085
@@ -2104,12 +2096,13 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
2104 hci_dev_unlock(hdev); 2096 hci_dev_unlock(hdev);
2105} 2097}
2106 2098
2107static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2099static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2108{ 2101{
2109 struct hci_ev_remote_features *ev = (void *) skb->data; 2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2110 struct hci_conn *conn; 2103 struct hci_conn *conn;
2111 2104
2112 BT_DBG("%s status %d", hdev->name, ev->status); 2105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2113 2106
2114 hci_dev_lock(hdev); 2107 hci_dev_lock(hdev);
2115 2108
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2128 cp.handle = ev->handle; 2121 cp.handle = ev->handle;
2129 cp.page = 0x01; 2122 cp.page = 0x01;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2131 sizeof(cp), &cp); 2124 sizeof(cp), &cp);
2132 goto unlock; 2125 goto unlock;
2133 } 2126 }
2134 2127
@@ -2153,17 +2146,18 @@ unlock:
2153 hci_dev_unlock(hdev); 2146 hci_dev_unlock(hdev);
2154} 2147}
2155 2148
2156static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2149static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2157{ 2150{
2158 BT_DBG("%s", hdev->name); 2151 BT_DBG("%s", hdev->name);
2159} 2152}
2160 2153
2161static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2154static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2162{ 2156{
2163 BT_DBG("%s", hdev->name); 2157 BT_DBG("%s", hdev->name);
2164} 2158}
2165 2159
2166static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2167{ 2161{
2168 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2169 __u16 opcode; 2163 __u16 opcode;
@@ -2370,7 +2364,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2370 break; 2364 break;
2371 2365
2372 default: 2366 default:
2373 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2367 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2374 break; 2368 break;
2375 } 2369 }
2376 2370
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2384 } 2378 }
2385} 2379}
2386 2380
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{ 2382{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode; 2384 __u16 opcode;
@@ -2451,7 +2445,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2451 break; 2445 break;
2452 2446
2453 default: 2447 default:
2454 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2448 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2455 break; 2449 break;
2456 } 2450 }
2457 2451
@@ -2465,12 +2459,12 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 } 2459 }
2466} 2460}
2467 2461
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{ 2463{
2470 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn; 2465 struct hci_conn *conn;
2472 2466
2473 BT_DBG("%s status %d", hdev->name, ev->status); 2467 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2474 2468
2475 hci_dev_lock(hdev); 2469 hci_dev_lock(hdev);
2476 2470
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2491 hci_dev_unlock(hdev); 2485 hci_dev_unlock(hdev);
2492} 2486}
2493 2487
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{ 2489{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i; 2491 int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2502 } 2496 }
2503 2497
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name); 2500 BT_DBG("%s bad parameters", hdev->name);
2507 return; 2501 return;
2508 } 2502 }
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2557 queue_work(hdev->workqueue, &hdev->tx_work); 2551 queue_work(hdev->workqueue, &hdev->tx_work);
2558} 2552}
2559 2553
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2561 struct sk_buff *skb)
2562{ 2555{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i; 2557 int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2569 } 2562 }
2570 2563
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name); 2566 BT_DBG("%s bad parameters", hdev->name);
2574 return; 2567 return;
2575 } 2568 }
2576 2569
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl); 2571 ev->num_hndl);
2579 2572
2580 for (i = 0; i < ev->num_hndl; i++) { 2573 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i]; 2574 struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,12 +2600,12 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2607 queue_work(hdev->workqueue, &hdev->tx_work); 2600 queue_work(hdev->workqueue, &hdev->tx_work);
2608} 2601}
2609 2602
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2603static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{ 2604{
2612 struct hci_ev_mode_change *ev = (void *) skb->data; 2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn; 2606 struct hci_conn *conn;
2614 2607
2615 BT_DBG("%s status %d", hdev->name, ev->status); 2608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2616 2609
2617 hci_dev_lock(hdev); 2610 hci_dev_lock(hdev);
2618 2611
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2621 conn->mode = ev->mode; 2614 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval); 2615 conn->interval = __le16_to_cpu(ev->interval);
2623 2616
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE) 2619 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else 2621 else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2635 hci_dev_unlock(hdev); 2629 hci_dev_unlock(hdev);
2636} 2630}
2637 2631
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{ 2633{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn; 2635 struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2656 2650
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr); 2653 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure; 2655 u8 secure;
2662 2656
@@ -2672,7 +2666,7 @@ unlock:
2672 hci_dev_unlock(hdev); 2666 hci_dev_unlock(hdev);
2673} 2667}
2674 2668
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{ 2670{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp; 2672 struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2689 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) { 2684 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name, 2685 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr)); 2686 batostr(&ev->bdaddr));
2693 goto not_found; 2687 goto not_found;
2694 } 2688 }
2695 2689
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr)); 2691 batostr(&ev->bdaddr));
2698 2692
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name); 2695 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found; 2696 goto not_found;
2703 } 2697 }
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) { 2700 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff && 2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found; 2704 goto not_found;
2712 } 2705 }
2713 2706
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) { 2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \ 2709 BT_DBG("%s ignoring key unauthenticated for high security",
2717 security", hdev->name); 2710 hdev->name);
2718 goto not_found; 2711 goto not_found;
2719 } 2712 }
2720 2713
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2723 } 2716 }
2724 2717
2725 bacpy(&cp.bdaddr, &ev->bdaddr); 2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16); 2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2727 2720
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729 2722
@@ -2736,7 +2729,7 @@ not_found:
2736 hci_dev_unlock(hdev); 2729 hci_dev_unlock(hdev);
2737} 2730}
2738 2731
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2732static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{ 2733{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn; 2735 struct hci_conn *conn;
@@ -2760,17 +2753,17 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2760 2753
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len); 2756 ev->key_type, pin_len);
2764 2757
2765 hci_dev_unlock(hdev); 2758 hci_dev_unlock(hdev);
2766} 2759}
2767 2760
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2761static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{ 2762{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data; 2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn; 2764 struct hci_conn *conn;
2772 2765
2773 BT_DBG("%s status %d", hdev->name, ev->status); 2766 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2774 2767
2775 hci_dev_lock(hdev); 2768 hci_dev_lock(hdev);
2776 2769
@@ -2788,12 +2781,12 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
2788 hci_dev_unlock(hdev); 2781 hci_dev_unlock(hdev);
2789} 2782}
2790 2783
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2784static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{ 2785{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn; 2787 struct hci_conn *conn;
2795 2788
2796 BT_DBG("%s status %d", hdev->name, ev->status); 2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2797 2790
2798 hci_dev_lock(hdev); 2791 hci_dev_lock(hdev);
2799 2792
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
2804 hci_dev_unlock(hdev); 2797 hci_dev_unlock(hdev);
2805} 2798}
2806 2799
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{ 2801{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie; 2803 struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
2822 hci_dev_unlock(hdev); 2815 hci_dev_unlock(hdev);
2823} 2816}
2824 2817
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2818static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2826{ 2820{
2827 struct inquiry_data data; 2821 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data); 2822 int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2881 hci_dev_unlock(hdev); 2875 hci_dev_unlock(hdev);
2882} 2876}
2883 2877
2884static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2885{ 2880{
2886 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2887 struct hci_conn *conn; 2882 struct hci_conn *conn;
@@ -2929,12 +2924,13 @@ unlock:
2929 hci_dev_unlock(hdev); 2924 hci_dev_unlock(hdev);
2930} 2925}
2931 2926
2932static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2927static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2933{ 2929{
2934 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn; 2931 struct hci_conn *conn;
2936 2932
2937 BT_DBG("%s status %d", hdev->name, ev->status); 2933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2938 2934
2939 hci_dev_lock(hdev); 2935 hci_dev_lock(hdev);
2940 2936
@@ -2984,19 +2980,20 @@ unlock:
2984 hci_dev_unlock(hdev); 2980 hci_dev_unlock(hdev);
2985} 2981}
2986 2982
2987static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2983static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2988{ 2984{
2989 BT_DBG("%s", hdev->name); 2985 BT_DBG("%s", hdev->name);
2990} 2986}
2991 2987
2992static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2988static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993{ 2989{
2994 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2995 2991
2996 BT_DBG("%s status %d", hdev->name, ev->status); 2992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2997} 2993}
2998 2994
2999static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2995static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
3000{ 2997{
3001 struct inquiry_data data; 2998 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3049,7 +3046,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3049 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 3046 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3050 struct hci_conn *conn; 3047 struct hci_conn *conn;
3051 3048
3052 BT_DBG("%s status %u handle %u", hdev->name, ev->status, 3049 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3053 __le16_to_cpu(ev->handle)); 3050 __le16_to_cpu(ev->handle));
3054 3051
3055 hci_dev_lock(hdev); 3052 hci_dev_lock(hdev);
@@ -3087,7 +3084,7 @@ unlock:
3087 hci_dev_unlock(hdev); 3084 hci_dev_unlock(hdev);
3088} 3085}
3089 3086
3090static inline u8 hci_get_auth_req(struct hci_conn *conn) 3087static u8 hci_get_auth_req(struct hci_conn *conn)
3091{ 3088{
3092 /* If remote requests dedicated bonding follow that lead */ 3089 /* If remote requests dedicated bonding follow that lead */
3093 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3090 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3106,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
3106 return conn->auth_type; 3103 return conn->auth_type;
3107} 3104}
3108 3105
3109static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3106static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110{ 3107{
3111 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3108 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3112 struct hci_conn *conn; 3109 struct hci_conn *conn;
@@ -3125,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3125 goto unlock; 3122 goto unlock;
3126 3123
3127 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3124 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3128 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3125 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3129 struct hci_cp_io_capability_reply cp; 3126 struct hci_cp_io_capability_reply cp;
3130 3127
3131 bacpy(&cp.bdaddr, &ev->bdaddr); 3128 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3136,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3136 conn->auth_type = hci_get_auth_req(conn); 3133 conn->auth_type = hci_get_auth_req(conn);
3137 cp.authentication = conn->auth_type; 3134 cp.authentication = conn->auth_type;
3138 3135
3139 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3136 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3140 hci_find_remote_oob_data(hdev, &conn->dst)) 3137 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3141 cp.oob_data = 0x01; 3138 cp.oob_data = 0x01;
3142 else 3139 else
3143 cp.oob_data = 0x00; 3140 cp.oob_data = 0x00;
3144 3141
3145 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3142 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3146 sizeof(cp), &cp); 3143 sizeof(cp), &cp);
3147 } else { 3144 } else {
3148 struct hci_cp_io_capability_neg_reply cp; 3145 struct hci_cp_io_capability_neg_reply cp;
3149 3146
@@ -3151,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3151 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3148 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3152 3149
3153 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3150 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3154 sizeof(cp), &cp); 3151 sizeof(cp), &cp);
3155 } 3152 }
3156 3153
3157unlock: 3154unlock:
3158 hci_dev_unlock(hdev); 3155 hci_dev_unlock(hdev);
3159} 3156}
3160 3157
3161static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3158static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3162{ 3159{
3163 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3160 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3164 struct hci_conn *conn; 3161 struct hci_conn *conn;
@@ -3180,8 +3177,8 @@ unlock:
3180 hci_dev_unlock(hdev); 3177 hci_dev_unlock(hdev);
3181} 3178}
3182 3179
3183static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3180static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3184 struct sk_buff *skb) 3181 struct sk_buff *skb)
3185{ 3182{
3186 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3183 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3187 int loc_mitm, rem_mitm, confirm_hint = 0; 3184 int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3209,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3209 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3206 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3210 BT_DBG("Rejecting request: remote device can't provide MITM"); 3207 BT_DBG("Rejecting request: remote device can't provide MITM");
3211 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3208 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3212 sizeof(ev->bdaddr), &ev->bdaddr); 3209 sizeof(ev->bdaddr), &ev->bdaddr);
3213 goto unlock; 3210 goto unlock;
3214 } 3211 }
3215 3212
3216 /* If no side requires MITM protection; auto-accept */ 3213 /* If no side requires MITM protection; auto-accept */
3217 if ((!loc_mitm || conn->remote_cap == 0x03) && 3214 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3218 (!rem_mitm || conn->io_capability == 0x03)) { 3215 (!rem_mitm || conn->io_capability == 0x03)) {
3219 3216
3220 /* If we're not the initiators request authorization to 3217 /* If we're not the initiators request authorization to
3221 * proceed from user space (mgmt_user_confirm with 3218 * proceed from user space (mgmt_user_confirm with
@@ -3227,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3227 } 3224 }
3228 3225
3229 BT_DBG("Auto-accept of user confirmation with %ums delay", 3226 BT_DBG("Auto-accept of user confirmation with %ums delay",
3230 hdev->auto_accept_delay); 3227 hdev->auto_accept_delay);
3231 3228
3232 if (hdev->auto_accept_delay > 0) { 3229 if (hdev->auto_accept_delay > 0) {
3233 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3230 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3236,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3236 } 3233 }
3237 3234
3238 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3235 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3239 sizeof(ev->bdaddr), &ev->bdaddr); 3236 sizeof(ev->bdaddr), &ev->bdaddr);
3240 goto unlock; 3237 goto unlock;
3241 } 3238 }
3242 3239
@@ -3248,8 +3245,8 @@ unlock:
3248 hci_dev_unlock(hdev); 3245 hci_dev_unlock(hdev);
3249} 3246}
3250 3247
3251static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3248static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3252 struct sk_buff *skb) 3249 struct sk_buff *skb)
3253{ 3250{
3254 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3251 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3255 3252
@@ -3263,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3263 hci_dev_unlock(hdev); 3260 hci_dev_unlock(hdev);
3264} 3261}
3265 3262
3266static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3263static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3264 struct sk_buff *skb)
3267{ 3265{
3268 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3266 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3269 struct hci_conn *conn; 3267 struct hci_conn *conn;
@@ -3291,7 +3289,8 @@ unlock:
3291 hci_dev_unlock(hdev); 3289 hci_dev_unlock(hdev);
3292} 3290}
3293 3291
3294static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3292static void hci_remote_host_features_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3295{ 3294{
3296 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3295 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3297 struct inquiry_entry *ie; 3296 struct inquiry_entry *ie;
@@ -3307,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
3307 hci_dev_unlock(hdev); 3306 hci_dev_unlock(hdev);
3308} 3307}
3309 3308
3310static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3309static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3311 struct sk_buff *skb) 3310 struct sk_buff *skb)
3312{ 3311{
3313 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3312 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3314 struct oob_data *data; 3313 struct oob_data *data;
@@ -3329,28 +3328,41 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3329 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3328 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3330 3329
3331 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3330 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3332 &cp); 3331 &cp);
3333 } else { 3332 } else {
3334 struct hci_cp_remote_oob_data_neg_reply cp; 3333 struct hci_cp_remote_oob_data_neg_reply cp;
3335 3334
3336 bacpy(&cp.bdaddr, &ev->bdaddr); 3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3337 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3336 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3338 &cp); 3337 &cp);
3339 } 3338 }
3340 3339
3341unlock: 3340unlock:
3342 hci_dev_unlock(hdev); 3341 hci_dev_unlock(hdev);
3343} 3342}
3344 3343
3345static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3344static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3346{ 3345{
3347 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3346 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3348 struct hci_conn *conn; 3347 struct hci_conn *conn;
3349 3348
3350 BT_DBG("%s status %d", hdev->name, ev->status); 3349 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3351 3350
3352 hci_dev_lock(hdev); 3351 hci_dev_lock(hdev);
3353 3352
3353 if (ev->status) {
3354 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3355 if (!conn)
3356 goto unlock;
3357
3358 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3359 conn->dst_type, ev->status);
3360 hci_proto_connect_cfm(conn, ev->status);
3361 conn->state = BT_CLOSED;
3362 hci_conn_del(conn);
3363 goto unlock;
3364 }
3365
3354 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3366 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3355 if (!conn) { 3367 if (!conn) {
3356 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3368 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3363,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3363 conn->dst_type = ev->bdaddr_type; 3375 conn->dst_type = ev->bdaddr_type;
3364 } 3376 }
3365 3377
3366 if (ev->status) {
3367 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3368 conn->dst_type, ev->status);
3369 hci_proto_connect_cfm(conn, ev->status);
3370 conn->state = BT_CLOSED;
3371 hci_conn_del(conn);
3372 goto unlock;
3373 }
3374
3375 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3378 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3376 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3379 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3377 conn->dst_type, 0, NULL, 0, NULL); 3380 conn->dst_type, 0, NULL, 0, NULL);
@@ -3389,8 +3392,7 @@ unlock:
3389 hci_dev_unlock(hdev); 3392 hci_dev_unlock(hdev);
3390} 3393}
3391 3394
3392static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3395static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3393 struct sk_buff *skb)
3394{ 3396{
3395 u8 num_reports = skb->data[0]; 3397 u8 num_reports = skb->data[0];
3396 void *ptr = &skb->data[1]; 3398 void *ptr = &skb->data[1];
@@ -3411,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3411 hci_dev_unlock(hdev); 3413 hci_dev_unlock(hdev);
3412} 3414}
3413 3415
3414static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3416static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415 struct sk_buff *skb)
3416{ 3417{
3417 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3418 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3418 struct hci_cp_le_ltk_reply cp; 3419 struct hci_cp_le_ltk_reply cp;
@@ -3420,7 +3421,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3420 struct hci_conn *conn; 3421 struct hci_conn *conn;
3421 struct smp_ltk *ltk; 3422 struct smp_ltk *ltk;
3422 3423
3423 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle)); 3424 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3424 3425
3425 hci_dev_lock(hdev); 3426 hci_dev_lock(hdev);
3426 3427
@@ -3455,7 +3456,7 @@ not_found:
3455 hci_dev_unlock(hdev); 3456 hci_dev_unlock(hdev);
3456} 3457}
3457 3458
3458static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3459static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3459{ 3460{
3460 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3461 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3461 3462
@@ -3644,7 +3645,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3644 break; 3645 break;
3645 3646
3646 default: 3647 default:
3647 BT_DBG("%s event 0x%x", hdev->name, event); 3648 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3648 break; 3649 break;
3649 } 3650 }
3650 3651
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426a..a7f04de03d79 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
24 24
25/* Bluetooth HCI sockets. */ 25/* Bluetooth HCI sockets. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <linux/uaccess.h>
46#include <asm/unaligned.h> 28#include <asm/unaligned.h>
47 29
48#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 flt = &hci_pi(sk)->filter; 95 flt = &hci_pi(sk)->filter;
114 96
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
117 continue; 100 continue;
118 101
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121 104
122 if (!hci_test_bit(evt, &flt->event_mask)) 105 if (!hci_test_bit(evt, &flt->event_mask))
123 continue; 106 continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_mon_hdr *hdr; 223 struct hci_mon_hdr *hdr;
241 224
242 /* Create a private copy with headroom */ 225 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
244 if (!skb_copy) 228 if (!skb_copy)
245 continue; 229 continue;
246 230
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
495} 479}
496 480
497/* Ioctls that require bound socket */ 481/* Ioctls that require bound socket */
498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
499{ 484{
500 struct hci_dev *hdev = hci_pi(sk)->hdev; 485 struct hci_dev *hdev = hci_pi(sk)->hdev;
501 486
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
540 } 525 }
541} 526}
542 527
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
544{ 530{
545 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg; 532 void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
601 } 587 }
602} 588}
603 589
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
605{ 592{
606 struct sockaddr_hci haddr; 593 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
690 return err; 677 return err;
691} 678}
692 679
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
694{ 682{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk; 684 struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
711 return 0; 699 return 0;
712} 700}
713 701
714static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 702static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 struct sk_buff *skb)
715{ 704{
716 __u32 mask = hci_pi(sk)->cmsg_mask; 705 __u32 mask = hci_pi(sk)->cmsg_mask;
717 706
718 if (mask & HCI_CMSG_DIR) { 707 if (mask & HCI_CMSG_DIR) {
719 int incoming = bt_cb(skb)->incoming; 708 int incoming = bt_cb(skb)->incoming;
720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 &incoming);
721 } 711 }
722 712
723 if (mask & HCI_CMSG_TSTAMP) { 713 if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
747} 737}
748 738
749static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 739static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 struct msghdr *msg, size_t len, int flags) 740 struct msghdr *msg, size_t len, int flags)
751{ 741{
752 int noblock = flags & MSG_DONTWAIT; 742 int noblock = flags & MSG_DONTWAIT;
753 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
857 u16 ocf = hci_opcode_ocf(opcode); 847 u16 ocf = hci_opcode_ocf(opcode);
858 848
859 if (((ogf > HCI_SFLT_MAX_OGF) || 849 if (((ogf > HCI_SFLT_MAX_OGF) ||
860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
861 !capable(CAP_NET_RAW)) { 851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
862 err = -EPERM; 853 err = -EPERM;
863 goto drop; 854 goto drop;
864 } 855 }
@@ -891,7 +882,8 @@ drop:
891 goto done; 882 goto done;
892} 883}
893 884
894static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 885static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
895{ 887{
896 struct hci_ufilter uf = { .opcode = 0 }; 888 struct hci_ufilter uf = { .opcode = 0 };
897 struct sock *sk = sock->sk; 889 struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
973 return err; 965 return err;
974} 966}
975 967
976static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 968static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
977{ 970{
978 struct hci_ufilter uf; 971 struct hci_ufilter uf;
979 struct sock *sk = sock->sk; 972 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eafa..a20e61c3653d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/init.h>
6#include <linux/debugfs.h> 3#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8#include <linux/module.h> 4#include <linux/module.h>
9 5
10#include <net/bluetooth/bluetooth.h> 6#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
31 } 27 }
32} 28}
33 29
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 30static ssize_t show_link_type(struct device *dev,
31 struct device_attribute *attr, char *buf)
35{ 32{
36 struct hci_conn *conn = to_hci_conn(dev); 33 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 34 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 35}
39 36
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf)
41{ 39{
42 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 42}
45 43
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_link_features(struct device *dev,
45 struct device_attribute *attr, char *buf)
47{ 46{
48 struct hci_conn *conn = to_hci_conn(dev); 47 struct hci_conn *conn = to_hci_conn(dev);
49 48
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 49 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 50 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 51 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 52 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 53 conn->features[6], conn->features[7]);
55} 54}
56 55
57#define LINK_ATTR(_name, _mode, _show, _store) \ 56#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
185 } 184 }
186} 185}
187 186
188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{ 189{
190 struct hci_dev *hdev = to_hci_dev(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192} 192}
193 193
194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf)
195{ 196{
196 struct hci_dev *hdev = to_hci_dev(dev); 197 struct hci_dev *hdev = to_hci_dev(dev);
197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 198 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
198} 199}
199 200
200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 201static ssize_t show_name(struct device *dev,
202 struct device_attribute *attr, char *buf)
201{ 203{
202 struct hci_dev *hdev = to_hci_dev(dev); 204 struct hci_dev *hdev = to_hci_dev(dev);
203 char name[HCI_MAX_NAME_LENGTH + 1]; 205 char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
210 return sprintf(buf, "%s\n", name); 212 return sprintf(buf, "%s\n", name);
211} 213}
212 214
213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
214{ 217{
215 struct hci_dev *hdev = to_hci_dev(dev); 218 struct hci_dev *hdev = to_hci_dev(dev);
216 return sprintf(buf, "0x%.2x%.2x%.2x\n", 219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 220 hdev->dev_class[1], hdev->dev_class[0]);
218} 221}
219 222
220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 223static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf)
221{ 225{
222 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
224} 228}
225 229
226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
227{ 232{
228 struct hci_dev *hdev = to_hci_dev(dev); 233 struct hci_dev *hdev = to_hci_dev(dev);
229 234
230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
231 hdev->features[0], hdev->features[1], 236 hdev->features[0], hdev->features[1],
232 hdev->features[2], hdev->features[3], 237 hdev->features[2], hdev->features[3],
233 hdev->features[4], hdev->features[5], 238 hdev->features[4], hdev->features[5],
234 hdev->features[6], hdev->features[7]); 239 hdev->features[6], hdev->features[7]);
235} 240}
236 241
237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
238{ 244{
239 struct hci_dev *hdev = to_hci_dev(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
240 return sprintf(buf, "%d\n", hdev->manufacturer); 246 return sprintf(buf, "%d\n", hdev->manufacturer);
241} 247}
242 248
243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
244{ 251{
245 struct hci_dev *hdev = to_hci_dev(dev); 252 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->hci_ver); 253 return sprintf(buf, "%d\n", hdev->hci_ver);
247} 254}
248 255
249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
250{ 258{
251 struct hci_dev *hdev = to_hci_dev(dev); 259 struct hci_dev *hdev = to_hci_dev(dev);
252 return sprintf(buf, "%d\n", hdev->hci_rev); 260 return sprintf(buf, "%d\n", hdev->hci_rev);
253} 261}
254 262
255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
256{ 265{
257 struct hci_dev *hdev = to_hci_dev(dev); 266 struct hci_dev *hdev = to_hci_dev(dev);
258 return sprintf(buf, "%d\n", hdev->idle_timeout); 267 return sprintf(buf, "%d\n", hdev->idle_timeout);
259} 268}
260 269
261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
262{ 273{
263 struct hci_dev *hdev = to_hci_dev(dev); 274 struct hci_dev *hdev = to_hci_dev(dev);
264 unsigned int val; 275 unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
276 return count; 287 return count;
277} 288}
278 289
279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
280{ 292{
281 struct hci_dev *hdev = to_hci_dev(dev); 293 struct hci_dev *hdev = to_hci_dev(dev);
282 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
283} 295}
284 296
285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
286{ 300{
287 struct hci_dev *hdev = to_hci_dev(dev); 301 struct hci_dev *hdev = to_hci_dev(dev);
288 u16 val; 302 u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
300 return count; 314 return count;
301} 315}
302 316
303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
304{ 319{
305 struct hci_dev *hdev = to_hci_dev(dev); 320 struct hci_dev *hdev = to_hci_dev(dev);
306 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
307} 322}
308 323
309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
310{ 327{
311 struct hci_dev *hdev = to_hci_dev(dev); 328 struct hci_dev *hdev = to_hci_dev(dev);
312 u16 val; 329 u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
335static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
336 353
337static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
338 show_idle_timeout, store_idle_timeout); 355 show_idle_timeout, store_idle_timeout);
339static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
340 show_sniff_max_interval, store_sniff_max_interval); 357 show_sniff_max_interval, store_sniff_max_interval);
341static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
342 show_sniff_min_interval, store_sniff_min_interval); 359 show_sniff_min_interval, store_sniff_min_interval);
343 360
344static struct attribute *bt_host_attrs[] = { 361static struct attribute *bt_host_attrs[] = {
345 &dev_attr_bus.attr, 362 &dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
455 memcpy(&data5, &uuid[14], 2); 472 memcpy(&data5, &uuid[14], 2);
456 473
457 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
458 ntohl(data0), ntohs(data1), ntohs(data2), 475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
459 ntohs(data3), ntohl(data4), ntohs(data5)); 476 ntohl(data4), ntohs(data5));
460} 477}
461 478
462static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
513} 530}
514 531
515DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
516 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
517 534
518void hci_init_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
519{ 536{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
547 return 0; 564 return 0;
548 565
549 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 566 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
550 hdev, &inquiry_cache_fops); 567 hdev, &inquiry_cache_fops);
551 568
552 debugfs_create_file("blacklist", 0444, hdev->debugfs, 569 debugfs_create_file("blacklist", 0444, hdev->debugfs,
553 hdev, &blacklist_fops); 570 hdev, &blacklist_fops);
554 571
555 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 572 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
556 573
557 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, 574 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
558 &auto_accept_delay_fops); 575 &auto_accept_delay_fops);
559 return 0; 576 return 0;
560} 577}
561 578
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b394..ccd985da6518 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/freezer.h>
32#include <linux/fcntl.h>
33#include <linux/skbuff.h>
34#include <linux/socket.h>
35#include <linux/ioctl.h>
36#include <linux/file.h> 24#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 25#include <linux/kthread.h>
41#include <net/sock.h>
42
43#include <linux/input.h>
44#include <linux/hid.h>
45#include <linux/hidraw.h> 26#include <linux/hidraw.h>
46 27
47#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
244} 225}
245 226
246static int __hidp_send_ctrl_message(struct hidp_session *session, 227static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size) 228 unsigned char hdr, unsigned char *data,
229 int size)
248{ 230{
249 struct sk_buff *skb; 231 struct sk_buff *skb;
250 232
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
268 return 0; 250 return 0;
269} 251}
270 252
271static inline int hidp_send_ctrl_message(struct hidp_session *session, 253static int hidp_send_ctrl_message(struct hidp_session *session,
272 unsigned char hdr, unsigned char *data, int size) 254 unsigned char hdr, unsigned char *data, int size)
273{ 255{
274 int err; 256 int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
471 mod_timer(&session->timer, jiffies + HZ * session->idle_to); 453 mod_timer(&session->timer, jiffies + HZ * session->idle_to);
472} 454}
473 455
474static inline void hidp_del_timer(struct hidp_session *session) 456static void hidp_del_timer(struct hidp_session *session)
475{ 457{
476 if (session->idle_to > 0) 458 if (session->idle_to > 0)
477 del_timer(&session->timer); 459 del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1f..18b3f6892a36 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24
25#include <linux/types.h>
26#include <linux/capability.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/poll.h>
30#include <linux/fcntl.h>
31#include <linux/skbuff.h>
32#include <linux/socket.h>
33#include <linux/ioctl.h>
34#include <linux/file.h> 24#include <linux/file.h>
35#include <linux/init.h>
36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h>
39 25
40#include "hidp.h" 26#include "hidp.h"
41 27
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4554e80d16a3..a8964db04bfb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h> 33#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h> 34#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54 35
55#include <net/bluetooth/bluetooth.h> 36#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h> 37#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
59 41
60bool disable_ertm; 42bool disable_ertm;
61 43
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 56 struct l2cap_chan *chan, int err);
75 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
76/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
77 62
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
196 state_to_string(state)); 181 state_to_string(state));
197 182
198 chan->state = state; 183 chan->state = state;
199 chan->ops->state_change(chan->data, state); 184 chan->ops->state_change(chan, state);
200} 185}
201 186
202static void l2cap_state_change(struct l2cap_chan *chan, int state) 187static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
224 release_sock(sk); 209 release_sock(sk);
225} 210}
226 211
212static void __set_retrans_timer(struct l2cap_chan *chan)
213{
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219}
220
221static void __set_monitor_timer(struct l2cap_chan *chan)
222{
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
227/* ---- L2CAP sequence number lists ---- */ 243/* ---- L2CAP sequence number lists ---- */
228 244
229/* For ERTM, ordered lists of sequence numbers must be tracked for 245/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
366 382
367 l2cap_chan_unlock(chan); 383 l2cap_chan_unlock(chan);
368 384
369 chan->ops->close(chan->data); 385 chan->ops->close(chan);
370 mutex_unlock(&conn->chan_lock); 386 mutex_unlock(&conn->chan_lock);
371 387
372 l2cap_chan_put(chan); 388 l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
392 408
393 atomic_set(&chan->refcnt, 1); 409 atomic_set(&chan->refcnt, 1);
394 410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
395 BT_DBG("chan %p", chan); 414 BT_DBG("chan %p", chan);
396 415
397 return chan; 416 return chan;
@@ -412,6 +431,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW; 435 chan->sec_level = BT_SECURITY_LOW;
416 436
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -430,7 +450,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
430 case L2CAP_CHAN_CONN_ORIENTED: 450 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) { 451 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */ 452 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU; 453 chan->omtu = L2CAP_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA; 454 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA; 455 chan->dcid = L2CAP_CID_LE_DATA;
436 } else { 456 } else {
@@ -447,6 +467,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
447 chan->omtu = L2CAP_DEFAULT_MTU; 467 chan->omtu = L2CAP_DEFAULT_MTU;
448 break; 468 break;
449 469
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
475 break;
476
450 default: 477 default:
451 /* Raw socket can send/recv signalling messages only */ 478 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING; 479 chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +493,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
466 list_add(&chan->list, &conn->chan_l); 493 list_add(&chan->list, &conn->chan_l);
467} 494}
468 495
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 496void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{ 497{
471 mutex_lock(&conn->chan_lock); 498 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan); 499 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock); 500 mutex_unlock(&conn->chan_lock);
474} 501}
475 502
476static void l2cap_chan_del(struct l2cap_chan *chan, int err) 503void l2cap_chan_del(struct l2cap_chan *chan, int err)
477{ 504{
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn; 505 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481 506
482 __clear_chan_timer(chan); 507 __clear_chan_timer(chan);
483 508
@@ -490,34 +515,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
490 l2cap_chan_put(chan); 515 l2cap_chan_put(chan);
491 516
492 chan->conn = NULL; 517 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500
501 if (err)
502 __l2cap_chan_set_err(chan, err);
503 518
504 if (parent) { 519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
505 bt_accept_unlink(sk); 520 hci_conn_put(conn->hcon);
506 parent->sk_data_ready(parent, 0); 521 }
507 } else
508 sk->sk_state_change(sk);
509 522
510 release_sock(sk); 523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
511 525
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && 526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return; 527 return;
515 528
516 skb_queue_purge(&chan->tx_q); 529 switch(chan->mode) {
517 530 case L2CAP_MODE_BASIC:
518 if (chan->mode == L2CAP_MODE_ERTM) { 531 break;
519 struct srej_list *l, *tmp;
520 532
533 case L2CAP_MODE_ERTM:
521 __clear_retrans_timer(chan); 534 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan); 535 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan); 536 __clear_ack_timer(chan);
@@ -526,30 +539,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 539
527 l2cap_seq_list_free(&chan->srej_list); 540 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list); 541 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534}
535 542
536static void l2cap_chan_cleanup_listen(struct sock *parent) 543 /* fall through */
537{
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550 544
551 chan->ops->close(chan->data); 545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
547 break;
552 } 548 }
549
550 return;
553} 551}
554 552
555void l2cap_chan_close(struct l2cap_chan *chan, int reason) 553void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +560,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 560
563 switch (chan->state) { 561 switch (chan->state) {
564 case BT_LISTEN: 562 case BT_LISTEN:
565 lock_sock(sk); 563 if (chan->ops->teardown)
566 l2cap_chan_cleanup_listen(sk); 564 chan->ops->teardown(chan, 0);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break; 565 break;
572 566
573 case BT_CONNECTED: 567 case BT_CONNECTED:
@@ -595,7 +589,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
595 rsp.scid = cpu_to_le16(chan->dcid); 589 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid); 590 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result); 591 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp); 594 sizeof(rsp), &rsp);
601 } 595 }
@@ -609,9 +603,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 break; 603 break;
610 604
611 default: 605 default:
612 lock_sock(sk); 606 if (chan->ops->teardown)
613 sock_set_flag(sk, SOCK_ZAPPED); 607 chan->ops->teardown(chan, 0);
614 release_sock(sk);
615 break; 608 break;
616 } 609 }
617} 610}
@@ -627,7 +620,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
627 default: 620 default:
628 return HCI_AT_NO_BONDING; 621 return HCI_AT_NO_BONDING;
629 } 622 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) { 623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
631 if (chan->sec_level == BT_SECURITY_LOW) 624 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP; 625 chan->sec_level = BT_SECURITY_SDP;
633 626
@@ -773,9 +766,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data), 767 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control); 768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
776 } else { 770 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data), 771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control); 772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
779 } 774 }
780} 775}
781 776
@@ -830,66 +825,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
830 } 825 }
831} 826}
832 827
833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 828static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
834{ 829{
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE; 831 return L2CAP_EXT_HDR_SIZE;
845 else 832 else
846 hlen = L2CAP_ENH_HDR_SIZE; 833 return L2CAP_ENH_HDR_SIZE;
834}
835
836static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
837 u32 control)
838{
839 struct sk_buff *skb;
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
847 842
848 if (chan->fcs == L2CAP_FCS_CRC16) 843 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE; 844 hlen += L2CAP_FCS_SIZE;
850 845
851 BT_DBG("chan %p, control 0x%8.8x", chan, control); 846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854 847
855 control |= __set_sframe(chan);
856
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb) 848 if (!skb)
865 return; 849 return ERR_PTR(-ENOMEM);
866 850
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid); 853 lh->cid = cpu_to_le16(chan->dcid);
870 854
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 else
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
872 859
873 if (chan->fcs == L2CAP_FCS_CRC16) { 860 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 } 863 }
877 864
878 skb->priority = HCI_PRIO_MAX; 865 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb); 866 return skb;
880} 867}
881 868
882static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 869static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
883{ 871{
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 872 struct sk_buff *skb;
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 873 u32 control_field;
874
875 BT_DBG("chan %p, control %p", chan, control);
876
877 if (!control->sframe)
878 return;
879
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
881 !control->poll)
882 control->final = 1;
883
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state); 887 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889 888
890 control |= __set_reqseq(chan, chan->buffer_seq); 889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
892 }
891 893
892 l2cap_send_sframe(chan, control); 894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
896
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
899 else
900 control_field = __pack_enhanced_control(control);
901
902 skb = l2cap_create_sframe_pdu(chan, control_field);
903 if (!IS_ERR(skb))
904 l2cap_do_send(chan, skb);
905}
906
907static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908{
909 struct l2cap_ctrl control;
910
911 BT_DBG("chan %p, poll %d", chan, poll);
912
913 memset(&control, 0, sizeof(control));
914 control.sframe = 1;
915 control.poll = poll;
916
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
919 else
920 control.super = L2CAP_SUPER_RR;
921
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
893} 924}
894 925
895static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 926static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +945,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
914 945
915static void l2cap_chan_ready(struct l2cap_chan *chan) 946static void l2cap_chan_ready(struct l2cap_chan *chan)
916{ 947{
917 struct sock *sk = chan->sk; 948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0; 949 chan->conf_state = 0;
927 __clear_chan_timer(chan); 950 __clear_chan_timer(chan);
928 951
929 __l2cap_state_change(chan, BT_CONNECTED); 952 chan->state = BT_CONNECTED;
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934 953
935 release_sock(sk); 954 chan->ops->ready(chan);
936} 955}
937 956
938static void l2cap_do_start(struct l2cap_chan *chan) 957static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +972,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
953 l2cap_send_conn_req(chan); 972 l2cap_send_conn_req(chan);
954 } else { 973 } else {
955 struct l2cap_info_req req; 974 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
957 976
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn); 978 conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1014,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
995 __clear_ack_timer(chan); 1014 __clear_ack_timer(chan);
996 } 1015 }
997 1016
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1019 return;
1020 }
1021
998 req.dcid = cpu_to_le16(chan->dcid); 1022 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid); 1023 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1077,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1053 if (test_bit(BT_SK_DEFER_SETUP, 1077 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) { 1078 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent; 1079 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent) 1082 if (parent)
1059 parent->sk_data_ready(parent, 0); 1083 parent->sk_data_ready(parent, 0);
1060 1084
1061 } else { 1085 } else {
1062 __l2cap_state_change(chan, BT_CONFIG); 1086 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1065 } 1089 }
1066 release_sock(sk); 1090 release_sock(sk);
1067 } else { 1091 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 } 1094 }
1071 1095
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1174,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1150 1174
1151 lock_sock(parent); 1175 lock_sock(parent);
1152 1176
1153 /* Check for backlog size */ 1177 chan = pchan->ops->new_connection(pchan);
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan) 1178 if (!chan)
1161 goto clean; 1179 goto clean;
1162 1180
@@ -1171,10 +1189,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1171 1189
1172 l2cap_chan_add(conn, chan); 1190 l2cap_chan_add(conn, chan);
1173 1191
1174 __set_chan_timer(chan, sk->sk_sndtimeo); 1192 l2cap_chan_ready(chan);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178 1193
1179clean: 1194clean:
1180 release_sock(parent); 1195 release_sock(parent);
@@ -1198,6 +1213,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 1213
1199 l2cap_chan_lock(chan); 1214 l2cap_chan_lock(chan);
1200 1215
1216 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1217 l2cap_chan_unlock(chan);
1218 continue;
1219 }
1220
1201 if (conn->hcon->type == LE_LINK) { 1221 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level)) 1222 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan); 1223 l2cap_chan_ready(chan);
@@ -1270,7 +1290,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 1290
1271 l2cap_chan_unlock(chan); 1291 l2cap_chan_unlock(chan);
1272 1292
1273 chan->ops->close(chan->data); 1293 chan->ops->close(chan);
1274 l2cap_chan_put(chan); 1294 l2cap_chan_put(chan);
1275 } 1295 }
1276 1296
@@ -1444,21 +1464,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1444 goto done; 1464 goto done;
1445 } 1465 }
1446 1466
1447 lock_sock(sk); 1467 switch (chan->state) {
1448
1449 switch (sk->sk_state) {
1450 case BT_CONNECT: 1468 case BT_CONNECT:
1451 case BT_CONNECT2: 1469 case BT_CONNECT2:
1452 case BT_CONFIG: 1470 case BT_CONFIG:
1453 /* Already connecting */ 1471 /* Already connecting */
1454 err = 0; 1472 err = 0;
1455 release_sock(sk);
1456 goto done; 1473 goto done;
1457 1474
1458 case BT_CONNECTED: 1475 case BT_CONNECTED:
1459 /* Already connected */ 1476 /* Already connected */
1460 err = -EISCONN; 1477 err = -EISCONN;
1461 release_sock(sk);
1462 goto done; 1478 goto done;
1463 1479
1464 case BT_OPEN: 1480 case BT_OPEN:
@@ -1468,13 +1484,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1468 1484
1469 default: 1485 default:
1470 err = -EBADFD; 1486 err = -EBADFD;
1471 release_sock(sk);
1472 goto done; 1487 goto done;
1473 } 1488 }
1474 1489
1475 /* Set destination address and psm */ 1490 /* Set destination address and psm */
1491 lock_sock(sk);
1476 bacpy(&bt_sk(sk)->dst, dst); 1492 bacpy(&bt_sk(sk)->dst, dst);
1477
1478 release_sock(sk); 1493 release_sock(sk);
1479 1494
1480 chan->psm = psm; 1495 chan->psm = psm;
@@ -1576,23 +1591,20 @@ int __l2cap_wait_ack(struct sock *sk)
1576static void l2cap_monitor_timeout(struct work_struct *work) 1591static void l2cap_monitor_timeout(struct work_struct *work)
1577{ 1592{
1578 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1593 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1579 monitor_timer.work); 1594 monitor_timer.work);
1580 1595
1581 BT_DBG("chan %p", chan); 1596 BT_DBG("chan %p", chan);
1582 1597
1583 l2cap_chan_lock(chan); 1598 l2cap_chan_lock(chan);
1584 1599
1585 if (chan->retry_count >= chan->remote_max_tx) { 1600 if (!chan->conn) {
1586 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1587 l2cap_chan_unlock(chan); 1601 l2cap_chan_unlock(chan);
1588 l2cap_chan_put(chan); 1602 l2cap_chan_put(chan);
1589 return; 1603 return;
1590 } 1604 }
1591 1605
1592 chan->retry_count++; 1606 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1593 __set_monitor_timer(chan);
1594 1607
1595 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1596 l2cap_chan_unlock(chan); 1608 l2cap_chan_unlock(chan);
1597 l2cap_chan_put(chan); 1609 l2cap_chan_put(chan);
1598} 1610}
@@ -1600,234 +1612,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1600static void l2cap_retrans_timeout(struct work_struct *work) 1612static void l2cap_retrans_timeout(struct work_struct *work)
1601{ 1613{
1602 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1614 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1603 retrans_timer.work); 1615 retrans_timer.work);
1604 1616
1605 BT_DBG("chan %p", chan); 1617 BT_DBG("chan %p", chan);
1606 1618
1607 l2cap_chan_lock(chan); 1619 l2cap_chan_lock(chan);
1608 1620
1609 chan->retry_count = 1; 1621 if (!chan->conn) {
1610 __set_monitor_timer(chan); 1622 l2cap_chan_unlock(chan);
1611 1623 l2cap_chan_put(chan);
1612 set_bit(CONN_WAIT_F, &chan->conn_state); 1624 return;
1613 1625 }
1614 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1615 1626
1627 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1616 l2cap_chan_unlock(chan); 1628 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan); 1629 l2cap_chan_put(chan);
1618} 1630}
1619 1631
1620static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1632static void l2cap_streaming_send(struct l2cap_chan *chan,
1633 struct sk_buff_head *skbs)
1621{ 1634{
1622 struct sk_buff *skb; 1635 struct sk_buff *skb;
1636 struct l2cap_ctrl *control;
1623 1637
1624 while ((skb = skb_peek(&chan->tx_q)) && 1638 BT_DBG("chan %p, skbs %p", chan, skbs);
1625 chan->unacked_frames) {
1626 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1627 break;
1628 1639
1629 skb = skb_dequeue(&chan->tx_q); 1640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1630 kfree_skb(skb);
1631 1641
1632 chan->unacked_frames--; 1642 while (!skb_queue_empty(&chan->tx_q)) {
1633 }
1634 1643
1635 if (!chan->unacked_frames) 1644 skb = skb_dequeue(&chan->tx_q);
1636 __clear_retrans_timer(chan);
1637}
1638 1645
1639static void l2cap_streaming_send(struct l2cap_chan *chan) 1646 bt_cb(skb)->control.retries = 1;
1640{ 1647 control = &bt_cb(skb)->control;
1641 struct sk_buff *skb; 1648
1642 u32 control; 1649 control->reqseq = 0;
1643 u16 fcs; 1650 control->txseq = chan->next_tx_seq;
1644 1651
1645 while ((skb = skb_dequeue(&chan->tx_q))) { 1652 __pack_control(chan, control, skb);
1646 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1647 control |= __set_txseq(chan, chan->next_tx_seq);
1648 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1649 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1650 1653
1651 if (chan->fcs == L2CAP_FCS_CRC16) { 1654 if (chan->fcs == L2CAP_FCS_CRC16) {
1652 fcs = crc16(0, (u8 *)skb->data, 1655 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1653 skb->len - L2CAP_FCS_SIZE); 1656 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1654 put_unaligned_le16(fcs,
1655 skb->data + skb->len - L2CAP_FCS_SIZE);
1656 } 1657 }
1657 1658
1658 l2cap_do_send(chan, skb); 1659 l2cap_do_send(chan, skb);
1659 1660
1661 BT_DBG("Sent txseq %u", control->txseq);
1662
1660 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1663 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 chan->frames_sent++;
1661 } 1665 }
1662} 1666}
1663 1667
1664static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1668static int l2cap_ertm_send(struct l2cap_chan *chan)
1665{ 1669{
1666 struct sk_buff *skb, *tx_skb; 1670 struct sk_buff *skb, *tx_skb;
1667 u16 fcs; 1671 struct l2cap_ctrl *control;
1668 u32 control; 1672 int sent = 0;
1669 1673
1670 skb = skb_peek(&chan->tx_q); 1674 BT_DBG("chan %p", chan);
1671 if (!skb)
1672 return;
1673 1675
1674 while (bt_cb(skb)->control.txseq != tx_seq) { 1676 if (chan->state != BT_CONNECTED)
1675 if (skb_queue_is_last(&chan->tx_q, skb)) 1677 return -ENOTCONN;
1676 return;
1677 1678
1678 skb = skb_queue_next(&chan->tx_q, skb); 1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1679 } 1680 return 0;
1680 1681
1681 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1682 while (chan->tx_send_head &&
1682 chan->remote_max_tx) { 1683 chan->unacked_frames < chan->remote_tx_win &&
1683 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1684 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1684 return; 1685
1685 } 1686 skb = chan->tx_send_head;
1686 1687
1687 tx_skb = skb_clone(skb, GFP_ATOMIC); 1688 bt_cb(skb)->control.retries = 1;
1688 bt_cb(skb)->control.retries++; 1689 control = &bt_cb(skb)->control;
1689 1690
1690 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1691 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1691 control &= __get_sar_mask(chan); 1692 control->final = 1;
1693
1694 control->reqseq = chan->buffer_seq;
1695 chan->last_acked_seq = chan->buffer_seq;
1696 control->txseq = chan->next_tx_seq;
1692 1697
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1698 __pack_control(chan, control, skb);
1694 control |= __set_ctrl_final(chan);
1695 1699
1696 control |= __set_reqseq(chan, chan->buffer_seq); 1700 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 control |= __set_txseq(chan, tx_seq); 1701 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1702 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1703 }
1698 1704
1699 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1705 /* Clone after data has been modified. Data is assumed to be
1706 read-only (for locking purposes) on cloned sk_buffs.
1707 */
1708 tx_skb = skb_clone(skb, GFP_KERNEL);
1700 1709
1701 if (chan->fcs == L2CAP_FCS_CRC16) { 1710 if (!tx_skb)
1702 fcs = crc16(0, (u8 *)tx_skb->data, 1711 break;
1703 tx_skb->len - L2CAP_FCS_SIZE); 1712
1704 put_unaligned_le16(fcs, 1713 __set_retrans_timer(chan);
1705 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1714
1715 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1716 chan->unacked_frames++;
1717 chan->frames_sent++;
1718 sent++;
1719
1720 if (skb_queue_is_last(&chan->tx_q, skb))
1721 chan->tx_send_head = NULL;
1722 else
1723 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1724
1725 l2cap_do_send(chan, tx_skb);
1726 BT_DBG("Sent txseq %u", control->txseq);
1706 } 1727 }
1707 1728
1708 l2cap_do_send(chan, tx_skb); 1729 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1730 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1731
1732 return sent;
1709} 1733}
1710 1734
1711static int l2cap_ertm_send(struct l2cap_chan *chan) 1735static void l2cap_ertm_resend(struct l2cap_chan *chan)
1712{ 1736{
1713 struct sk_buff *skb, *tx_skb; 1737 struct l2cap_ctrl control;
1714 u16 fcs; 1738 struct sk_buff *skb;
1715 u32 control; 1739 struct sk_buff *tx_skb;
1716 int nsent = 0; 1740 u16 seq;
1717 1741
1718 if (chan->state != BT_CONNECTED) 1742 BT_DBG("chan %p", chan);
1719 return -ENOTCONN;
1720 1743
1721 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1744 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1722 return 0; 1745 return;
1723 1746
1724 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1747 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1748 seq = l2cap_seq_list_pop(&chan->retrans_list);
1725 1749
1726 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1750 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1727 chan->remote_max_tx) { 1751 if (!skb) {
1728 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1752 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1729 break; 1753 seq);
1754 continue;
1730 } 1755 }
1731 1756
1732 tx_skb = skb_clone(skb, GFP_ATOMIC);
1733
1734 bt_cb(skb)->control.retries++; 1757 bt_cb(skb)->control.retries++;
1758 control = bt_cb(skb)->control;
1735 1759
1736 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1760 if (chan->max_tx != 0 &&
1737 control &= __get_sar_mask(chan); 1761 bt_cb(skb)->control.retries > chan->max_tx) {
1762 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1764 l2cap_seq_list_clear(&chan->retrans_list);
1765 break;
1766 }
1738 1767
1768 control.reqseq = chan->buffer_seq;
1739 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1769 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1740 control |= __set_ctrl_final(chan); 1770 control.final = 1;
1771 else
1772 control.final = 0;
1741 1773
1742 control |= __set_reqseq(chan, chan->buffer_seq); 1774 if (skb_cloned(skb)) {
1743 control |= __set_txseq(chan, chan->next_tx_seq); 1775 /* Cloned sk_buffs are read-only, so we need a
1744 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1776 * writeable copy
1777 */
1778 tx_skb = skb_copy(skb, GFP_ATOMIC);
1779 } else {
1780 tx_skb = skb_clone(skb, GFP_ATOMIC);
1781 }
1745 1782
1746 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1783 if (!tx_skb) {
1784 l2cap_seq_list_clear(&chan->retrans_list);
1785 break;
1786 }
1787
1788 /* Update skb contents */
1789 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1790 put_unaligned_le32(__pack_extended_control(&control),
1791 tx_skb->data + L2CAP_HDR_SIZE);
1792 } else {
1793 put_unaligned_le16(__pack_enhanced_control(&control),
1794 tx_skb->data + L2CAP_HDR_SIZE);
1795 }
1747 1796
1748 if (chan->fcs == L2CAP_FCS_CRC16) { 1797 if (chan->fcs == L2CAP_FCS_CRC16) {
1749 fcs = crc16(0, (u8 *)skb->data, 1798 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1750 tx_skb->len - L2CAP_FCS_SIZE); 1799 put_unaligned_le16(fcs, skb_put(tx_skb,
1751 put_unaligned_le16(fcs, skb->data + 1800 L2CAP_FCS_SIZE));
1752 tx_skb->len - L2CAP_FCS_SIZE);
1753 } 1801 }
1754 1802
1755 l2cap_do_send(chan, tx_skb); 1803 l2cap_do_send(chan, tx_skb);
1756 1804
1757 __set_retrans_timer(chan); 1805 BT_DBG("Resent txseq %d", control.txseq);
1758
1759 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1760 1806
1761 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1807 chan->last_acked_seq = chan->buffer_seq;
1762
1763 if (bt_cb(skb)->control.retries == 1) {
1764 chan->unacked_frames++;
1765
1766 if (!nsent++)
1767 __clear_ack_timer(chan);
1768 }
1769
1770 chan->frames_sent++;
1771
1772 if (skb_queue_is_last(&chan->tx_q, skb))
1773 chan->tx_send_head = NULL;
1774 else
1775 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1776 } 1808 }
1777
1778 return nsent;
1779} 1809}
1780 1810
1781static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1811static void l2cap_retransmit(struct l2cap_chan *chan,
1812 struct l2cap_ctrl *control)
1782{ 1813{
1783 int ret; 1814 BT_DBG("chan %p, control %p", chan, control);
1784 1815
1785 if (!skb_queue_empty(&chan->tx_q)) 1816 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1786 chan->tx_send_head = chan->tx_q.next; 1817 l2cap_ertm_resend(chan);
1787
1788 chan->next_tx_seq = chan->expected_ack_seq;
1789 ret = l2cap_ertm_send(chan);
1790 return ret;
1791} 1818}
1792 1819
1793static void __l2cap_send_ack(struct l2cap_chan *chan) 1820static void l2cap_retransmit_all(struct l2cap_chan *chan,
1821 struct l2cap_ctrl *control)
1794{ 1822{
1795 u32 control = 0; 1823 struct sk_buff *skb;
1796 1824
1797 control |= __set_reqseq(chan, chan->buffer_seq); 1825 BT_DBG("chan %p, control %p", chan, control);
1798 1826
1799 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1827 if (control->poll)
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1828 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1801 set_bit(CONN_RNR_SENT, &chan->conn_state); 1829
1802 l2cap_send_sframe(chan, control); 1830 l2cap_seq_list_clear(&chan->retrans_list);
1803 return;
1804 }
1805 1831
1806 if (l2cap_ertm_send(chan) > 0) 1832 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1807 return; 1833 return;
1808 1834
1809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1835 if (chan->unacked_frames) {
1810 l2cap_send_sframe(chan, control); 1836 skb_queue_walk(&chan->tx_q, skb) {
1837 if (bt_cb(skb)->control.txseq == control->reqseq ||
1838 skb == chan->tx_send_head)
1839 break;
1840 }
1841
1842 skb_queue_walk_from(&chan->tx_q, skb) {
1843 if (skb == chan->tx_send_head)
1844 break;
1845
1846 l2cap_seq_list_append(&chan->retrans_list,
1847 bt_cb(skb)->control.txseq);
1848 }
1849
1850 l2cap_ertm_resend(chan);
1851 }
1811} 1852}
1812 1853
1813static void l2cap_send_ack(struct l2cap_chan *chan) 1854static void l2cap_send_ack(struct l2cap_chan *chan)
1814{ 1855{
1815 __clear_ack_timer(chan); 1856 struct l2cap_ctrl control;
1816 __l2cap_send_ack(chan); 1857 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1817} 1858 chan->last_acked_seq);
1859 int threshold;
1818 1860
1819static void l2cap_send_srejtail(struct l2cap_chan *chan) 1861 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1820{ 1862 chan, chan->last_acked_seq, chan->buffer_seq);
1821 struct srej_list *tail;
1822 u32 control;
1823 1863
1824 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1864 memset(&control, 0, sizeof(control));
1825 control |= __set_ctrl_final(chan); 1865 control.sframe = 1;
1826 1866
1827 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1828 control |= __set_reqseq(chan, tail->tx_seq); 1868 chan->rx_state == L2CAP_RX_STATE_RECV) {
1869 __clear_ack_timer(chan);
1870 control.super = L2CAP_SUPER_RNR;
1871 control.reqseq = chan->buffer_seq;
1872 l2cap_send_sframe(chan, &control);
1873 } else {
1874 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1875 l2cap_ertm_send(chan);
1876 /* If any i-frames were sent, they included an ack */
1877 if (chan->buffer_seq == chan->last_acked_seq)
1878 frames_to_ack = 0;
1879 }
1829 1880
1830 l2cap_send_sframe(chan, control); 1881 /* Ack now if the window is 3/4ths full.
1882 * Calculate without mul or div
1883 */
1884 threshold = chan->ack_win;
1885 threshold += threshold << 1;
1886 threshold >>= 2;
1887
1888 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1889 threshold);
1890
1891 if (frames_to_ack >= threshold) {
1892 __clear_ack_timer(chan);
1893 control.super = L2CAP_SUPER_RR;
1894 control.reqseq = chan->buffer_seq;
1895 l2cap_send_sframe(chan, &control);
1896 frames_to_ack = 0;
1897 }
1898
1899 if (frames_to_ack)
1900 __set_ack_timer(chan);
1901 }
1831} 1902}
1832 1903
1833static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 1904static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1876,15 +1947,15 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1876} 1947}
1877 1948
1878static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 1949static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1879 struct msghdr *msg, size_t len, 1950 struct msghdr *msg, size_t len,
1880 u32 priority) 1951 u32 priority)
1881{ 1952{
1882 struct l2cap_conn *conn = chan->conn; 1953 struct l2cap_conn *conn = chan->conn;
1883 struct sk_buff *skb; 1954 struct sk_buff *skb;
1884 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 1955 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1885 struct l2cap_hdr *lh; 1956 struct l2cap_hdr *lh;
1886 1957
1887 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority); 1958 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1888 1959
1889 count = min_t(unsigned int, (conn->mtu - hlen), len); 1960 count = min_t(unsigned int, (conn->mtu - hlen), len);
1890 1961
@@ -1910,15 +1981,15 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1910} 1981}
1911 1982
1912static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 1983static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1913 struct msghdr *msg, size_t len, 1984 struct msghdr *msg, size_t len,
1914 u32 priority) 1985 u32 priority)
1915{ 1986{
1916 struct l2cap_conn *conn = chan->conn; 1987 struct l2cap_conn *conn = chan->conn;
1917 struct sk_buff *skb; 1988 struct sk_buff *skb;
1918 int err, count; 1989 int err, count;
1919 struct l2cap_hdr *lh; 1990 struct l2cap_hdr *lh;
1920 1991
1921 BT_DBG("chan %p len %d", chan, (int)len); 1992 BT_DBG("chan %p len %zu", chan, len);
1922 1993
1923 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 1994 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1924 1995
@@ -1943,23 +2014,20 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1943} 2014}
1944 2015
1945static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 2016static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1946 struct msghdr *msg, size_t len, 2017 struct msghdr *msg, size_t len,
1947 u16 sdulen) 2018 u16 sdulen)
1948{ 2019{
1949 struct l2cap_conn *conn = chan->conn; 2020 struct l2cap_conn *conn = chan->conn;
1950 struct sk_buff *skb; 2021 struct sk_buff *skb;
1951 int err, count, hlen; 2022 int err, count, hlen;
1952 struct l2cap_hdr *lh; 2023 struct l2cap_hdr *lh;
1953 2024
1954 BT_DBG("chan %p len %d", chan, (int)len); 2025 BT_DBG("chan %p len %zu", chan, len);
1955 2026
1956 if (!conn) 2027 if (!conn)
1957 return ERR_PTR(-ENOTCONN); 2028 return ERR_PTR(-ENOTCONN);
1958 2029
1959 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2030 hlen = __ertm_hdr_size(chan);
1960 hlen = L2CAP_EXT_HDR_SIZE;
1961 else
1962 hlen = L2CAP_ENH_HDR_SIZE;
1963 2031
1964 if (sdulen) 2032 if (sdulen)
1965 hlen += L2CAP_SDULEN_SIZE; 2033 hlen += L2CAP_SDULEN_SIZE;
@@ -1979,7 +2047,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1979 lh->cid = cpu_to_le16(chan->dcid); 2047 lh->cid = cpu_to_le16(chan->dcid);
1980 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2048 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1981 2049
1982 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); 2050 /* Control header is populated later */
2051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2052 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2053 else
2054 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1983 2055
1984 if (sdulen) 2056 if (sdulen)
1985 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2057 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1990,9 +2062,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1990 return ERR_PTR(err); 2062 return ERR_PTR(err);
1991 } 2063 }
1992 2064
1993 if (chan->fcs == L2CAP_FCS_CRC16) 2065 bt_cb(skb)->control.fcs = chan->fcs;
1994 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1995
1996 bt_cb(skb)->control.retries = 0; 2066 bt_cb(skb)->control.retries = 0;
1997 return skb; 2067 return skb;
1998} 2068}
@@ -2004,10 +2074,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2004 struct sk_buff *skb; 2074 struct sk_buff *skb;
2005 u16 sdu_len; 2075 u16 sdu_len;
2006 size_t pdu_len; 2076 size_t pdu_len;
2007 int err = 0;
2008 u8 sar; 2077 u8 sar;
2009 2078
2010 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); 2079 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2011 2080
2012 /* It is critical that ERTM PDUs fit in a single HCI fragment, 2081 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2013 * so fragmented skbs are not used. The HCI layer's handling 2082 * so fragmented skbs are not used. The HCI layer's handling
@@ -2020,7 +2089,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2020 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2089 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2021 2090
2022 /* Adjust for largest possible L2CAP overhead. */ 2091 /* Adjust for largest possible L2CAP overhead. */
2023 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; 2092 if (chan->fcs)
2093 pdu_len -= L2CAP_FCS_SIZE;
2094
2095 pdu_len -= __ertm_hdr_size(chan);
2024 2096
2025 /* Remote device may have requested smaller PDUs */ 2097 /* Remote device may have requested smaller PDUs */
2026 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2098 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2060,7 +2132,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2060 } 2132 }
2061 } 2133 }
2062 2134
2063 return err; 2135 return 0;
2064} 2136}
2065 2137
2066int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2138int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2122,17 +2194,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2122 if (err) 2194 if (err)
2123 break; 2195 break;
2124 2196
2125 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2126 chan->tx_send_head = seg_queue.next;
2127 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2128
2129 if (chan->mode == L2CAP_MODE_ERTM) 2197 if (chan->mode == L2CAP_MODE_ERTM)
2130 err = l2cap_ertm_send(chan); 2198 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2131 else 2199 else
2132 l2cap_streaming_send(chan); 2200 l2cap_streaming_send(chan, &seg_queue);
2133 2201
2134 if (err >= 0) 2202 err = len;
2135 err = len;
2136 2203
2137 /* If the skbs were not queued for sending, they'll still be in 2204 /* If the skbs were not queued for sending, they'll still be in
2138 * seg_queue and need to be purged. 2205 * seg_queue and need to be purged.
@@ -2148,6 +2215,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2148 return err; 2215 return err;
2149} 2216}
2150 2217
2218static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2219{
2220 struct l2cap_ctrl control;
2221 u16 seq;
2222
2223 BT_DBG("chan %p, txseq %u", chan, txseq);
2224
2225 memset(&control, 0, sizeof(control));
2226 control.sframe = 1;
2227 control.super = L2CAP_SUPER_SREJ;
2228
2229 for (seq = chan->expected_tx_seq; seq != txseq;
2230 seq = __next_seq(chan, seq)) {
2231 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2232 control.reqseq = seq;
2233 l2cap_send_sframe(chan, &control);
2234 l2cap_seq_list_append(&chan->srej_list, seq);
2235 }
2236 }
2237
2238 chan->expected_tx_seq = __next_seq(chan, txseq);
2239}
2240
2241static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2242{
2243 struct l2cap_ctrl control;
2244
2245 BT_DBG("chan %p", chan);
2246
2247 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2248 return;
2249
2250 memset(&control, 0, sizeof(control));
2251 control.sframe = 1;
2252 control.super = L2CAP_SUPER_SREJ;
2253 control.reqseq = chan->srej_list.tail;
2254 l2cap_send_sframe(chan, &control);
2255}
2256
2257static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2258{
2259 struct l2cap_ctrl control;
2260 u16 initial_head;
2261 u16 seq;
2262
2263 BT_DBG("chan %p, txseq %u", chan, txseq);
2264
2265 memset(&control, 0, sizeof(control));
2266 control.sframe = 1;
2267 control.super = L2CAP_SUPER_SREJ;
2268
2269 /* Capture initial list head to allow only one pass through the list. */
2270 initial_head = chan->srej_list.head;
2271
2272 do {
2273 seq = l2cap_seq_list_pop(&chan->srej_list);
2274 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2275 break;
2276
2277 control.reqseq = seq;
2278 l2cap_send_sframe(chan, &control);
2279 l2cap_seq_list_append(&chan->srej_list, seq);
2280 } while (chan->srej_list.head != initial_head);
2281}
2282
2283static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2284{
2285 struct sk_buff *acked_skb;
2286 u16 ackseq;
2287
2288 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2289
2290 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2291 return;
2292
2293 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2294 chan->expected_ack_seq, chan->unacked_frames);
2295
2296 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2297 ackseq = __next_seq(chan, ackseq)) {
2298
2299 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2300 if (acked_skb) {
2301 skb_unlink(acked_skb, &chan->tx_q);
2302 kfree_skb(acked_skb);
2303 chan->unacked_frames--;
2304 }
2305 }
2306
2307 chan->expected_ack_seq = reqseq;
2308
2309 if (chan->unacked_frames == 0)
2310 __clear_retrans_timer(chan);
2311
2312 BT_DBG("unacked_frames %u", chan->unacked_frames);
2313}
2314
2315static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2316{
2317 BT_DBG("chan %p", chan);
2318
2319 chan->expected_tx_seq = chan->buffer_seq;
2320 l2cap_seq_list_clear(&chan->srej_list);
2321 skb_queue_purge(&chan->srej_q);
2322 chan->rx_state = L2CAP_RX_STATE_RECV;
2323}
2324
2325static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2326 struct l2cap_ctrl *control,
2327 struct sk_buff_head *skbs, u8 event)
2328{
2329 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2330 event);
2331
2332 switch (event) {
2333 case L2CAP_EV_DATA_REQUEST:
2334 if (chan->tx_send_head == NULL)
2335 chan->tx_send_head = skb_peek(skbs);
2336
2337 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2338 l2cap_ertm_send(chan);
2339 break;
2340 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2341 BT_DBG("Enter LOCAL_BUSY");
2342 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343
2344 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2345 /* The SREJ_SENT state must be aborted if we are to
2346 * enter the LOCAL_BUSY state.
2347 */
2348 l2cap_abort_rx_srej_sent(chan);
2349 }
2350
2351 l2cap_send_ack(chan);
2352
2353 break;
2354 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2355 BT_DBG("Exit LOCAL_BUSY");
2356 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2357
2358 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2359 struct l2cap_ctrl local_control;
2360
2361 memset(&local_control, 0, sizeof(local_control));
2362 local_control.sframe = 1;
2363 local_control.super = L2CAP_SUPER_RR;
2364 local_control.poll = 1;
2365 local_control.reqseq = chan->buffer_seq;
2366 l2cap_send_sframe(chan, &local_control);
2367
2368 chan->retry_count = 1;
2369 __set_monitor_timer(chan);
2370 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2371 }
2372 break;
2373 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2374 l2cap_process_reqseq(chan, control->reqseq);
2375 break;
2376 case L2CAP_EV_EXPLICIT_POLL:
2377 l2cap_send_rr_or_rnr(chan, 1);
2378 chan->retry_count = 1;
2379 __set_monitor_timer(chan);
2380 __clear_ack_timer(chan);
2381 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 break;
2383 case L2CAP_EV_RETRANS_TO:
2384 l2cap_send_rr_or_rnr(chan, 1);
2385 chan->retry_count = 1;
2386 __set_monitor_timer(chan);
2387 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2388 break;
2389 case L2CAP_EV_RECV_FBIT:
2390 /* Nothing to process */
2391 break;
2392 default:
2393 break;
2394 }
2395}
2396
2397static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2398 struct l2cap_ctrl *control,
2399 struct sk_buff_head *skbs, u8 event)
2400{
2401 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2402 event);
2403
2404 switch (event) {
2405 case L2CAP_EV_DATA_REQUEST:
2406 if (chan->tx_send_head == NULL)
2407 chan->tx_send_head = skb_peek(skbs);
2408 /* Queue data, but don't send. */
2409 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2410 break;
2411 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2412 BT_DBG("Enter LOCAL_BUSY");
2413 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414
2415 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2416 /* The SREJ_SENT state must be aborted if we are to
2417 * enter the LOCAL_BUSY state.
2418 */
2419 l2cap_abort_rx_srej_sent(chan);
2420 }
2421
2422 l2cap_send_ack(chan);
2423
2424 break;
2425 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2426 BT_DBG("Exit LOCAL_BUSY");
2427 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2428
2429 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2430 struct l2cap_ctrl local_control;
2431 memset(&local_control, 0, sizeof(local_control));
2432 local_control.sframe = 1;
2433 local_control.super = L2CAP_SUPER_RR;
2434 local_control.poll = 1;
2435 local_control.reqseq = chan->buffer_seq;
2436 l2cap_send_sframe(chan, &local_control);
2437
2438 chan->retry_count = 1;
2439 __set_monitor_timer(chan);
2440 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2441 }
2442 break;
2443 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2444 l2cap_process_reqseq(chan, control->reqseq);
2445
2446 /* Fall through */
2447
2448 case L2CAP_EV_RECV_FBIT:
2449 if (control && control->final) {
2450 __clear_monitor_timer(chan);
2451 if (chan->unacked_frames > 0)
2452 __set_retrans_timer(chan);
2453 chan->retry_count = 0;
2454 chan->tx_state = L2CAP_TX_STATE_XMIT;
2455 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2456 }
2457 break;
2458 case L2CAP_EV_EXPLICIT_POLL:
2459 /* Ignore */
2460 break;
2461 case L2CAP_EV_MONITOR_TO:
2462 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2463 l2cap_send_rr_or_rnr(chan, 1);
2464 __set_monitor_timer(chan);
2465 chan->retry_count++;
2466 } else {
2467 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2468 }
2469 break;
2470 default:
2471 break;
2472 }
2473}
2474
2475static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2476 struct sk_buff_head *skbs, u8 event)
2477{
2478 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2479 chan, control, skbs, event, chan->tx_state);
2480
2481 switch (chan->tx_state) {
2482 case L2CAP_TX_STATE_XMIT:
2483 l2cap_tx_state_xmit(chan, control, skbs, event);
2484 break;
2485 case L2CAP_TX_STATE_WAIT_F:
2486 l2cap_tx_state_wait_f(chan, control, skbs, event);
2487 break;
2488 default:
2489 /* Ignore event */
2490 break;
2491 }
2492}
2493
2494static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2495 struct l2cap_ctrl *control)
2496{
2497 BT_DBG("chan %p, control %p", chan, control);
2498 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2499}
2500
2501static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2502 struct l2cap_ctrl *control)
2503{
2504 BT_DBG("chan %p, control %p", chan, control);
2505 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2506}
2507
2151/* Copy frame to all raw sockets on that connection */ 2508/* Copy frame to all raw sockets on that connection */
2152static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2509static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2153{ 2510{
@@ -2170,7 +2527,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2170 if (!nskb) 2527 if (!nskb)
2171 continue; 2528 continue;
2172 2529
2173 if (chan->ops->recv(chan->data, nskb)) 2530 if (chan->ops->recv(chan, nskb))
2174 kfree_skb(nskb); 2531 kfree_skb(nskb);
2175 } 2532 }
2176 2533
@@ -2178,16 +2535,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2178} 2535}
2179 2536
2180/* ---- L2CAP signalling commands ---- */ 2537/* ---- L2CAP signalling commands ---- */
2181static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 2538static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2182 u8 code, u8 ident, u16 dlen, void *data) 2539 u8 ident, u16 dlen, void *data)
2183{ 2540{
2184 struct sk_buff *skb, **frag; 2541 struct sk_buff *skb, **frag;
2185 struct l2cap_cmd_hdr *cmd; 2542 struct l2cap_cmd_hdr *cmd;
2186 struct l2cap_hdr *lh; 2543 struct l2cap_hdr *lh;
2187 int len, count; 2544 int len, count;
2188 2545
2189 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", 2546 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2190 conn, code, ident, dlen); 2547 conn, code, ident, dlen);
2191 2548
2192 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2549 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2193 count = min_t(unsigned int, conn->mtu, len); 2550 count = min_t(unsigned int, conn->mtu, len);
@@ -2200,9 +2557,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2200 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2557 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2201 2558
2202 if (conn->hcon->type == LE_LINK) 2559 if (conn->hcon->type == LE_LINK)
2203 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2560 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2204 else 2561 else
2205 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2206 2563
2207 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2564 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2208 cmd->code = code; 2565 cmd->code = code;
@@ -2270,7 +2627,7 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
2270 break; 2627 break;
2271 } 2628 }
2272 2629
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); 2630 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2274 return len; 2631 return len;
2275} 2632}
2276 2633
@@ -2278,7 +2635,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2278{ 2635{
2279 struct l2cap_conf_opt *opt = *ptr; 2636 struct l2cap_conf_opt *opt = *ptr;
2280 2637
2281 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); 2638 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2282 2639
2283 opt->type = type; 2640 opt->type = type;
2284 opt->len = len; 2641 opt->len = len;
@@ -2314,8 +2671,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2314 efs.stype = chan->local_stype; 2671 efs.stype = chan->local_stype;
2315 efs.msdu = cpu_to_le16(chan->local_msdu); 2672 efs.msdu = cpu_to_le16(chan->local_msdu);
2316 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2673 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2317 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2674 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2318 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2675 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2319 break; 2676 break;
2320 2677
2321 case L2CAP_MODE_STREAMING: 2678 case L2CAP_MODE_STREAMING:
@@ -2338,20 +2695,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2338static void l2cap_ack_timeout(struct work_struct *work) 2695static void l2cap_ack_timeout(struct work_struct *work)
2339{ 2696{
2340 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2697 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2341 ack_timer.work); 2698 ack_timer.work);
2699 u16 frames_to_ack;
2342 2700
2343 BT_DBG("chan %p", chan); 2701 BT_DBG("chan %p", chan);
2344 2702
2345 l2cap_chan_lock(chan); 2703 l2cap_chan_lock(chan);
2346 2704
2347 __l2cap_send_ack(chan); 2705 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2706 chan->last_acked_seq);
2348 2707
2349 l2cap_chan_unlock(chan); 2708 if (frames_to_ack)
2709 l2cap_send_rr_or_rnr(chan, 0);
2350 2710
2711 l2cap_chan_unlock(chan);
2351 l2cap_chan_put(chan); 2712 l2cap_chan_put(chan);
2352} 2713}
2353 2714
2354static inline int l2cap_ertm_init(struct l2cap_chan *chan) 2715int l2cap_ertm_init(struct l2cap_chan *chan)
2355{ 2716{
2356 int err; 2717 int err;
2357 2718
@@ -2360,7 +2721,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2360 chan->expected_ack_seq = 0; 2721 chan->expected_ack_seq = 0;
2361 chan->unacked_frames = 0; 2722 chan->unacked_frames = 0;
2362 chan->buffer_seq = 0; 2723 chan->buffer_seq = 0;
2363 chan->num_acked = 0;
2364 chan->frames_sent = 0; 2724 chan->frames_sent = 0;
2365 chan->last_acked_seq = 0; 2725 chan->last_acked_seq = 0;
2366 chan->sdu = NULL; 2726 chan->sdu = NULL;
@@ -2381,12 +2741,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2381 2741
2382 skb_queue_head_init(&chan->srej_q); 2742 skb_queue_head_init(&chan->srej_q);
2383 2743
2384 INIT_LIST_HEAD(&chan->srej_l);
2385 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 2744 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2386 if (err < 0) 2745 if (err < 0)
2387 return err; 2746 return err;
2388 2747
2389 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 2748 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2749 if (err < 0)
2750 l2cap_seq_list_free(&chan->srej_list);
2751
2752 return err;
2390} 2753}
2391 2754
2392static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2755static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2424,6 +2787,7 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2424 L2CAP_DEFAULT_TX_WINDOW); 2787 L2CAP_DEFAULT_TX_WINDOW);
2425 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 2788 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2426 } 2789 }
2790 chan->ack_win = chan->tx_win;
2427} 2791}
2428 2792
2429static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) 2793static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
@@ -2512,6 +2876,7 @@ done:
2512 break; 2876 break;
2513 2877
2514 case L2CAP_MODE_STREAMING: 2878 case L2CAP_MODE_STREAMING:
2879 l2cap_txwin_setup(chan);
2515 rfc.mode = L2CAP_MODE_STREAMING; 2880 rfc.mode = L2CAP_MODE_STREAMING;
2516 rfc.txwin_size = 0; 2881 rfc.txwin_size = 0;
2517 rfc.max_transmit = 0; 2882 rfc.max_transmit = 0;
@@ -2542,7 +2907,7 @@ done:
2542 } 2907 }
2543 2908
2544 req->dcid = cpu_to_le16(chan->dcid); 2909 req->dcid = cpu_to_le16(chan->dcid);
2545 req->flags = cpu_to_le16(0); 2910 req->flags = __constant_cpu_to_le16(0);
2546 2911
2547 return ptr - data; 2912 return ptr - data;
2548} 2913}
@@ -2762,7 +3127,7 @@ done:
2762 } 3127 }
2763 rsp->scid = cpu_to_le16(chan->dcid); 3128 rsp->scid = cpu_to_le16(chan->dcid);
2764 rsp->result = cpu_to_le16(result); 3129 rsp->result = cpu_to_le16(result);
2765 rsp->flags = cpu_to_le16(0x0000); 3130 rsp->flags = __constant_cpu_to_le16(0);
2766 3131
2767 return ptr - data; 3132 return ptr - data;
2768} 3133}
@@ -2812,10 +3177,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2812 break; 3177 break;
2813 3178
2814 case L2CAP_CONF_EWS: 3179 case L2CAP_CONF_EWS:
2815 chan->tx_win = min_t(u16, val, 3180 chan->ack_win = min_t(u16, val, chan->ack_win);
2816 L2CAP_DEFAULT_EXT_WINDOW);
2817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2818 chan->tx_win); 3182 chan->tx_win);
2819 break; 3183 break;
2820 3184
2821 case L2CAP_CONF_EFS: 3185 case L2CAP_CONF_EFS:
@@ -2844,6 +3208,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2844 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3208 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2845 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3209 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2846 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3210 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3212 chan->ack_win = min_t(u16, chan->ack_win,
3213 rfc.txwin_size);
2847 3214
2848 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2849 chan->local_msdu = le16_to_cpu(efs.msdu); 3216 chan->local_msdu = le16_to_cpu(efs.msdu);
@@ -2861,7 +3228,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2861 } 3228 }
2862 3229
2863 req->dcid = cpu_to_le16(chan->dcid); 3230 req->dcid = cpu_to_le16(chan->dcid);
2864 req->flags = cpu_to_le16(0x0000); 3231 req->flags = __constant_cpu_to_le16(0);
2865 3232
2866 return ptr - data; 3233 return ptr - data;
2867} 3234}
@@ -2888,8 +3255,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2888 3255
2889 rsp.scid = cpu_to_le16(chan->dcid); 3256 rsp.scid = cpu_to_le16(chan->dcid);
2890 rsp.dcid = cpu_to_le16(chan->scid); 3257 rsp.dcid = cpu_to_le16(chan->scid);
2891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3258 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
2892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3259 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
2893 l2cap_send_cmd(conn, chan->ident, 3260 l2cap_send_cmd(conn, chan->ident,
2894 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3261 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2895 3262
@@ -2905,7 +3272,17 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2905{ 3272{
2906 int type, olen; 3273 int type, olen;
2907 unsigned long val; 3274 unsigned long val;
2908 struct l2cap_conf_rfc rfc; 3275 /* Use sane default values in case a misbehaving remote device
3276 * did not send an RFC or extended window size option.
3277 */
3278 u16 txwin_ext = chan->ack_win;
3279 struct l2cap_conf_rfc rfc = {
3280 .mode = chan->mode,
3281 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3282 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3283 .max_pdu_size = cpu_to_le16(chan->imtu),
3284 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3285 };
2909 3286
2910 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); 3287 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2911 3288
@@ -2915,32 +3292,27 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2915 while (len >= L2CAP_CONF_OPT_SIZE) { 3292 while (len >= L2CAP_CONF_OPT_SIZE) {
2916 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 3293 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2917 3294
2918 if (type != L2CAP_CONF_RFC) 3295 switch (type) {
2919 continue; 3296 case L2CAP_CONF_RFC:
2920 3297 if (olen == sizeof(rfc))
2921 if (olen != sizeof(rfc)) 3298 memcpy(&rfc, (void *)val, olen);
2922 break; 3299 break;
2923 3300 case L2CAP_CONF_EWS:
2924 memcpy(&rfc, (void *)val, olen); 3301 txwin_ext = val;
2925 goto done; 3302 break;
3303 }
2926 } 3304 }
2927 3305
2928 /* Use sane default values in case a misbehaving remote device
2929 * did not send an RFC option.
2930 */
2931 rfc.mode = chan->mode;
2932 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2933 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2934 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2935
2936 BT_ERR("Expected RFC option was not found, using defaults");
2937
2938done:
2939 switch (rfc.mode) { 3306 switch (rfc.mode) {
2940 case L2CAP_MODE_ERTM: 3307 case L2CAP_MODE_ERTM:
2941 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 3308 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2942 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 3309 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2943 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3310 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3312 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3313 else
3314 chan->ack_win = min_t(u16, chan->ack_win,
3315 rfc.txwin_size);
2944 break; 3316 break;
2945 case L2CAP_MODE_STREAMING: 3317 case L2CAP_MODE_STREAMING:
2946 chan->mps = le16_to_cpu(rfc.max_pdu_size); 3318 chan->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2993,7 +3365,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2993 lock_sock(parent); 3365 lock_sock(parent);
2994 3366
2995 /* Check if the ACL is secure enough (if not SDP) */ 3367 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm != cpu_to_le16(0x0001) && 3368 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
2997 !hci_conn_check_link_mode(conn->hcon)) { 3369 !hci_conn_check_link_mode(conn->hcon)) {
2998 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3370 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2999 result = L2CAP_CR_SEC_BLOCK; 3371 result = L2CAP_CR_SEC_BLOCK;
@@ -3002,25 +3374,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
3002 3374
3003 result = L2CAP_CR_NO_MEM; 3375 result = L2CAP_CR_NO_MEM;
3004 3376
3005 /* Check for backlog size */ 3377 /* Check if we already have channel with that dcid */
3006 if (sk_acceptq_is_full(parent)) { 3378 if (__l2cap_get_chan_by_dcid(conn, scid))
3007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3008 goto response; 3379 goto response;
3009 }
3010 3380
3011 chan = pchan->ops->new_connection(pchan->data); 3381 chan = pchan->ops->new_connection(pchan);
3012 if (!chan) 3382 if (!chan)
3013 goto response; 3383 goto response;
3014 3384
3015 sk = chan->sk; 3385 sk = chan->sk;
3016 3386
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3019 sock_set_flag(sk, SOCK_ZAPPED);
3020 chan->ops->close(chan->data);
3021 goto response;
3022 }
3023
3024 hci_conn_hold(conn->hcon); 3387 hci_conn_hold(conn->hcon);
3025 3388
3026 bacpy(&bt_sk(sk)->src, conn->src); 3389 bacpy(&bt_sk(sk)->src, conn->src);
@@ -3074,7 +3437,7 @@ sendresp:
3074 3437
3075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3438 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3076 struct l2cap_info_req info; 3439 struct l2cap_info_req info;
3077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3440 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3078 3441
3079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3442 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3080 conn->info_ident = l2cap_get_ident(conn); 3443 conn->info_ident = l2cap_get_ident(conn);
@@ -3196,7 +3559,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3196 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3559 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3197 struct l2cap_cmd_rej_cid rej; 3560 struct l2cap_cmd_rej_cid rej;
3198 3561
3199 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 3562 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3200 rej.scid = cpu_to_le16(chan->scid); 3563 rej.scid = cpu_to_le16(chan->scid);
3201 rej.dcid = cpu_to_le16(chan->dcid); 3564 rej.dcid = cpu_to_le16(chan->dcid);
3202 3565
@@ -3218,11 +3581,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3218 memcpy(chan->conf_req + chan->conf_len, req->data, len); 3581 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3219 chan->conf_len += len; 3582 chan->conf_len += len;
3220 3583
3221 if (flags & 0x0001) { 3584 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3222 /* Incomplete config. Send empty response. */ 3585 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3224 l2cap_build_conf_rsp(chan, rsp, 3587 l2cap_build_conf_rsp(chan, rsp,
3225 L2CAP_CONF_SUCCESS, 0x0001), rsp); 3588 L2CAP_CONF_SUCCESS, flags), rsp);
3226 goto unlock; 3589 goto unlock;
3227 } 3590 }
3228 3591
@@ -3245,8 +3608,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3245 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 3608 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3246 set_default_fcs(chan); 3609 set_default_fcs(chan);
3247 3610
3248 l2cap_state_change(chan, BT_CONNECTED);
3249
3250 if (chan->mode == L2CAP_MODE_ERTM || 3611 if (chan->mode == L2CAP_MODE_ERTM ||
3251 chan->mode == L2CAP_MODE_STREAMING) 3612 chan->mode == L2CAP_MODE_STREAMING)
3252 err = l2cap_ertm_init(chan); 3613 err = l2cap_ertm_init(chan);
@@ -3278,7 +3639,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3278 3639
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3280 l2cap_build_conf_rsp(chan, rsp, 3641 l2cap_build_conf_rsp(chan, rsp,
3281 L2CAP_CONF_SUCCESS, 0x0000), rsp); 3642 L2CAP_CONF_SUCCESS, flags), rsp);
3282 } 3643 }
3283 3644
3284unlock: 3645unlock:
@@ -3369,7 +3730,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3369 goto done; 3730 goto done;
3370 } 3731 }
3371 3732
3372 if (flags & 0x01) 3733 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3373 goto done; 3734 goto done;
3374 3735
3375 set_bit(CONF_INPUT_DONE, &chan->conf_state); 3736 set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3377,7 +3738,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3377 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 3738 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3378 set_default_fcs(chan); 3739 set_default_fcs(chan);
3379 3740
3380 l2cap_state_change(chan, BT_CONNECTED);
3381 if (chan->mode == L2CAP_MODE_ERTM || 3741 if (chan->mode == L2CAP_MODE_ERTM ||
3382 chan->mode == L2CAP_MODE_STREAMING) 3742 chan->mode == L2CAP_MODE_STREAMING)
3383 err = l2cap_ertm_init(chan); 3743 err = l2cap_ertm_init(chan);
@@ -3431,7 +3791,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3431 3791
3432 l2cap_chan_unlock(chan); 3792 l2cap_chan_unlock(chan);
3433 3793
3434 chan->ops->close(chan->data); 3794 chan->ops->close(chan);
3435 l2cap_chan_put(chan); 3795 l2cap_chan_put(chan);
3436 3796
3437 mutex_unlock(&conn->chan_lock); 3797 mutex_unlock(&conn->chan_lock);
@@ -3465,7 +3825,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3465 3825
3466 l2cap_chan_unlock(chan); 3826 l2cap_chan_unlock(chan);
3467 3827
3468 chan->ops->close(chan->data); 3828 chan->ops->close(chan);
3469 l2cap_chan_put(chan); 3829 l2cap_chan_put(chan);
3470 3830
3471 mutex_unlock(&conn->chan_lock); 3831 mutex_unlock(&conn->chan_lock);
@@ -3486,8 +3846,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3486 u8 buf[8]; 3846 u8 buf[8];
3487 u32 feat_mask = l2cap_feat_mask; 3847 u32 feat_mask = l2cap_feat_mask;
3488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3848 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3489 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3849 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3490 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3850 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3491 if (!disable_ertm) 3851 if (!disable_ertm)
3492 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3852 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3493 | L2CAP_FEAT_FCS; 3853 | L2CAP_FEAT_FCS;
@@ -3507,15 +3867,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3507 else 3867 else
3508 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3868 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3509 3869
3510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3870 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3871 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3512 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3872 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3513 l2cap_send_cmd(conn, cmd->ident, 3873 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(buf), buf); 3874 L2CAP_INFO_RSP, sizeof(buf), buf);
3515 } else { 3875 } else {
3516 struct l2cap_info_rsp rsp; 3876 struct l2cap_info_rsp rsp;
3517 rsp.type = cpu_to_le16(type); 3877 rsp.type = cpu_to_le16(type);
3518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3878 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3519 l2cap_send_cmd(conn, cmd->ident, 3879 l2cap_send_cmd(conn, cmd->ident,
3520 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3880 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3521 } 3881 }
@@ -3555,7 +3915,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3555 3915
3556 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3916 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3557 struct l2cap_info_req req; 3917 struct l2cap_info_req req;
3558 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3918 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3559 3919
3560 conn->info_ident = l2cap_get_ident(conn); 3920 conn->info_ident = l2cap_get_ident(conn);
3561 3921
@@ -3598,7 +3958,7 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3598 psm = le16_to_cpu(req->psm); 3958 psm = le16_to_cpu(req->psm);
3599 scid = le16_to_cpu(req->scid); 3959 scid = le16_to_cpu(req->scid);
3600 3960
3601 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); 3961 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3602 3962
3603 /* Placeholder: Always reject */ 3963 /* Placeholder: Always reject */
3604 rsp.dcid = 0; 3964 rsp.dcid = 0;
@@ -3621,11 +3981,11 @@ static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3621} 3981}
3622 3982
3623static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, 3983static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3624 u16 icid, u16 result) 3984 u16 icid, u16 result)
3625{ 3985{
3626 struct l2cap_move_chan_rsp rsp; 3986 struct l2cap_move_chan_rsp rsp;
3627 3987
3628 BT_DBG("icid %d, result %d", icid, result); 3988 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3629 3989
3630 rsp.icid = cpu_to_le16(icid); 3990 rsp.icid = cpu_to_le16(icid);
3631 rsp.result = cpu_to_le16(result); 3991 rsp.result = cpu_to_le16(result);
@@ -3634,12 +3994,13 @@ static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3634} 3994}
3635 3995
3636static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, 3996static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3637 struct l2cap_chan *chan, u16 icid, u16 result) 3997 struct l2cap_chan *chan,
3998 u16 icid, u16 result)
3638{ 3999{
3639 struct l2cap_move_chan_cfm cfm; 4000 struct l2cap_move_chan_cfm cfm;
3640 u8 ident; 4001 u8 ident;
3641 4002
3642 BT_DBG("icid %d, result %d", icid, result); 4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3643 4004
3644 ident = l2cap_get_ident(conn); 4005 ident = l2cap_get_ident(conn);
3645 if (chan) 4006 if (chan)
@@ -3652,18 +4013,19 @@ static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3652} 4013}
3653 4014
3654static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 4015static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3655 u16 icid) 4016 u16 icid)
3656{ 4017{
3657 struct l2cap_move_chan_cfm_rsp rsp; 4018 struct l2cap_move_chan_cfm_rsp rsp;
3658 4019
3659 BT_DBG("icid %d", icid); 4020 BT_DBG("icid 0x%4.4x", icid);
3660 4021
3661 rsp.icid = cpu_to_le16(icid); 4022 rsp.icid = cpu_to_le16(icid);
3662 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 4023 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3663} 4024}
3664 4025
3665static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 4026static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3666 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4027 struct l2cap_cmd_hdr *cmd,
4028 u16 cmd_len, void *data)
3667{ 4029{
3668 struct l2cap_move_chan_req *req = data; 4030 struct l2cap_move_chan_req *req = data;
3669 u16 icid = 0; 4031 u16 icid = 0;
@@ -3674,7 +4036,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3674 4036
3675 icid = le16_to_cpu(req->icid); 4037 icid = le16_to_cpu(req->icid);
3676 4038
3677 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); 4039 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
3678 4040
3679 if (!enable_hs) 4041 if (!enable_hs)
3680 return -EINVAL; 4042 return -EINVAL;
@@ -3686,7 +4048,8 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3686} 4048}
3687 4049
3688static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, 4050static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3689 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4051 struct l2cap_cmd_hdr *cmd,
4052 u16 cmd_len, void *data)
3690{ 4053{
3691 struct l2cap_move_chan_rsp *rsp = data; 4054 struct l2cap_move_chan_rsp *rsp = data;
3692 u16 icid, result; 4055 u16 icid, result;
@@ -3697,7 +4060,7 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3697 icid = le16_to_cpu(rsp->icid); 4060 icid = le16_to_cpu(rsp->icid);
3698 result = le16_to_cpu(rsp->result); 4061 result = le16_to_cpu(rsp->result);
3699 4062
3700 BT_DBG("icid %d, result %d", icid, result); 4063 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3701 4064
3702 /* Placeholder: Always unconfirmed */ 4065 /* Placeholder: Always unconfirmed */
3703 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); 4066 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
@@ -3706,7 +4069,8 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3706} 4069}
3707 4070
3708static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, 4071static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3709 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4072 struct l2cap_cmd_hdr *cmd,
4073 u16 cmd_len, void *data)
3710{ 4074{
3711 struct l2cap_move_chan_cfm *cfm = data; 4075 struct l2cap_move_chan_cfm *cfm = data;
3712 u16 icid, result; 4076 u16 icid, result;
@@ -3717,7 +4081,7 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3717 icid = le16_to_cpu(cfm->icid); 4081 icid = le16_to_cpu(cfm->icid);
3718 result = le16_to_cpu(cfm->result); 4082 result = le16_to_cpu(cfm->result);
3719 4083
3720 BT_DBG("icid %d, result %d", icid, result); 4084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3721 4085
3722 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 4086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3723 4087
@@ -3725,7 +4089,8 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3725} 4089}
3726 4090
3727static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, 4091static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) 4092 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data)
3729{ 4094{
3730 struct l2cap_move_chan_cfm_rsp *rsp = data; 4095 struct l2cap_move_chan_cfm_rsp *rsp = data;
3731 u16 icid; 4096 u16 icid;
@@ -3735,7 +4100,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3735 4100
3736 icid = le16_to_cpu(rsp->icid); 4101 icid = le16_to_cpu(rsp->icid);
3737 4102
3738 BT_DBG("icid %d", icid); 4103 BT_DBG("icid 0x%4.4x", icid);
3739 4104
3740 return 0; 4105 return 0;
3741} 4106}
@@ -3790,9 +4155,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3790 4155
3791 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 4156 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3792 if (err) 4157 if (err)
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4158 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3794 else 4159 else
3795 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3796 4161
3797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4162 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3798 sizeof(rsp), &rsp); 4163 sizeof(rsp), &rsp);
@@ -3940,7 +4305,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3940 BT_ERR("Wrong link type (%d)", err); 4305 BT_ERR("Wrong link type (%d)", err);
3941 4306
3942 /* FIXME: Map err to a valid reason */ 4307 /* FIXME: Map err to a valid reason */
3943 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 4308 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3944 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4309 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3945 } 4310 }
3946 4311
@@ -3972,65 +4337,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3972 return 0; 4337 return 0;
3973} 4338}
3974 4339
3975static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 4340static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3976{ 4341{
3977 u32 control = 0; 4342 struct l2cap_ctrl control;
3978 4343
3979 chan->frames_sent = 0; 4344 BT_DBG("chan %p", chan);
3980 4345
3981 control |= __set_reqseq(chan, chan->buffer_seq); 4346 memset(&control, 0, sizeof(control));
4347 control.sframe = 1;
4348 control.final = 1;
4349 control.reqseq = chan->buffer_seq;
4350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3982 4351
3983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4352 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3984 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 4353 control.super = L2CAP_SUPER_RNR;
3985 l2cap_send_sframe(chan, control); 4354 l2cap_send_sframe(chan, &control);
3986 set_bit(CONN_RNR_SENT, &chan->conn_state);
3987 } 4355 }
3988 4356
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 4357 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3990 l2cap_retransmit_frames(chan); 4358 chan->unacked_frames > 0)
4359 __set_retrans_timer(chan);
3991 4360
4361 /* Send pending iframes */
3992 l2cap_ertm_send(chan); 4362 l2cap_ertm_send(chan);
3993 4363
3994 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 4364 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3995 chan->frames_sent == 0) { 4365 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
3996 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 4366 /* F-bit wasn't sent in an s-frame or i-frame yet, so
3997 l2cap_send_sframe(chan, control); 4367 * send it now.
3998 } 4368 */
3999} 4369 control.super = L2CAP_SUPER_RR;
4000 4370 l2cap_send_sframe(chan, &control);
4001static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4002{
4003 struct sk_buff *next_skb;
4004 int tx_seq_offset, next_tx_seq_offset;
4005
4006 bt_cb(skb)->control.txseq = tx_seq;
4007 bt_cb(skb)->control.sar = sar;
4008
4009 next_skb = skb_peek(&chan->srej_q);
4010
4011 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4012
4013 while (next_skb) {
4014 if (bt_cb(next_skb)->control.txseq == tx_seq)
4015 return -EINVAL;
4016
4017 next_tx_seq_offset = __seq_offset(chan,
4018 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4019
4020 if (next_tx_seq_offset > tx_seq_offset) {
4021 __skb_queue_before(&chan->srej_q, next_skb, skb);
4022 return 0;
4023 }
4024
4025 if (skb_queue_is_last(&chan->srej_q, next_skb))
4026 next_skb = NULL;
4027 else
4028 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4029 } 4371 }
4030
4031 __skb_queue_tail(&chan->srej_q, skb);
4032
4033 return 0;
4034} 4372}
4035 4373
4036static void append_skb_frag(struct sk_buff *skb, 4374static void append_skb_frag(struct sk_buff *skb,
@@ -4052,16 +4390,17 @@ static void append_skb_frag(struct sk_buff *skb,
4052 skb->truesize += new_frag->truesize; 4390 skb->truesize += new_frag->truesize;
4053} 4391}
4054 4392
4055static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 4393static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4394 struct l2cap_ctrl *control)
4056{ 4395{
4057 int err = -EINVAL; 4396 int err = -EINVAL;
4058 4397
4059 switch (__get_ctrl_sar(chan, control)) { 4398 switch (control->sar) {
4060 case L2CAP_SAR_UNSEGMENTED: 4399 case L2CAP_SAR_UNSEGMENTED:
4061 if (chan->sdu) 4400 if (chan->sdu)
4062 break; 4401 break;
4063 4402
4064 err = chan->ops->recv(chan->data, skb); 4403 err = chan->ops->recv(chan, skb);
4065 break; 4404 break;
4066 4405
4067 case L2CAP_SAR_START: 4406 case L2CAP_SAR_START:
@@ -4111,7 +4450,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4111 if (chan->sdu->len != chan->sdu_len) 4450 if (chan->sdu->len != chan->sdu_len)
4112 break; 4451 break;
4113 4452
4114 err = chan->ops->recv(chan->data, chan->sdu); 4453 err = chan->ops->recv(chan, chan->sdu);
4115 4454
4116 if (!err) { 4455 if (!err) {
4117 /* Reassembly complete */ 4456 /* Reassembly complete */
@@ -4133,448 +4472,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4133 return err; 4472 return err;
4134} 4473}
4135 4474
4136static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4475void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4137{ 4476{
4138 BT_DBG("chan %p, Enter local busy", chan); 4477 u8 event;
4139 4478
4140 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4479 if (chan->mode != L2CAP_MODE_ERTM)
4141 l2cap_seq_list_clear(&chan->srej_list); 4480 return;
4142 4481
4143 __set_ack_timer(chan); 4482 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4483 l2cap_tx(chan, NULL, NULL, event);
4144} 4484}
4145 4485
4146static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4486static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4147{ 4487{
4148 u32 control; 4488 int err = 0;
4149 4489 /* Pass sequential frames to l2cap_reassemble_sdu()
4150 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4490 * until a gap is encountered.
4151 goto done; 4491 */
4152 4492
4153 control = __set_reqseq(chan, chan->buffer_seq); 4493 BT_DBG("chan %p", chan);
4154 control |= __set_ctrl_poll(chan);
4155 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4156 l2cap_send_sframe(chan, control);
4157 chan->retry_count = 1;
4158 4494
4159 __clear_retrans_timer(chan); 4495 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4160 __set_monitor_timer(chan); 4496 struct sk_buff *skb;
4497 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4498 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4161 4499
4162 set_bit(CONN_WAIT_F, &chan->conn_state); 4500 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4163 4501
4164done: 4502 if (!skb)
4165 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4503 break;
4166 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4167 4504
4168 BT_DBG("chan %p, Exit local busy", chan); 4505 skb_unlink(skb, &chan->srej_q);
4169} 4506 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4507 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4508 if (err)
4509 break;
4510 }
4170 4511
4171void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 4512 if (skb_queue_empty(&chan->srej_q)) {
4172{ 4513 chan->rx_state = L2CAP_RX_STATE_RECV;
4173 if (chan->mode == L2CAP_MODE_ERTM) { 4514 l2cap_send_ack(chan);
4174 if (busy)
4175 l2cap_ertm_enter_local_busy(chan);
4176 else
4177 l2cap_ertm_exit_local_busy(chan);
4178 } 4515 }
4516
4517 return err;
4179} 4518}
4180 4519
4181static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 4520static void l2cap_handle_srej(struct l2cap_chan *chan,
4521 struct l2cap_ctrl *control)
4182{ 4522{
4183 struct sk_buff *skb; 4523 struct sk_buff *skb;
4184 u32 control;
4185 4524
4186 while ((skb = skb_peek(&chan->srej_q)) && 4525 BT_DBG("chan %p, control %p", chan, control);
4187 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4188 int err;
4189 4526
4190 if (bt_cb(skb)->control.txseq != tx_seq) 4527 if (control->reqseq == chan->next_tx_seq) {
4191 break; 4528 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4530 return;
4531 }
4192 4532
4193 skb = skb_dequeue(&chan->srej_q); 4533 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4194 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4195 err = l2cap_reassemble_sdu(chan, skb, control);
4196 4534
4197 if (err < 0) { 4535 if (skb == NULL) {
4198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4536 BT_DBG("Seq %d not available for retransmission",
4199 break; 4537 control->reqseq);
4200 } 4538 return;
4539 }
4201 4540
4202 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 4541 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4203 tx_seq = __next_seq(chan, tx_seq); 4542 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4544 return;
4204 } 4545 }
4205}
4206 4546
4207static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4547 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4208{
4209 struct srej_list *l, *tmp;
4210 u32 control;
4211 4548
4212 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 4549 if (control->poll) {
4213 if (l->tx_seq == tx_seq) { 4550 l2cap_pass_to_tx(chan, control);
4214 list_del(&l->list); 4551
4215 kfree(l); 4552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4216 return; 4553 l2cap_retransmit(chan, control);
4554 l2cap_ertm_send(chan);
4555
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4559 }
4560 } else {
4561 l2cap_pass_to_tx_fbit(chan, control);
4562
4563 if (control->final) {
4564 if (chan->srej_save_reqseq != control->reqseq ||
4565 !test_and_clear_bit(CONN_SREJ_ACT,
4566 &chan->conn_state))
4567 l2cap_retransmit(chan, control);
4568 } else {
4569 l2cap_retransmit(chan, control);
4570 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4572 chan->srej_save_reqseq = control->reqseq;
4573 }
4217 } 4574 }
4218 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4219 control |= __set_reqseq(chan, l->tx_seq);
4220 l2cap_send_sframe(chan, control);
4221 list_del(&l->list);
4222 list_add_tail(&l->list, &chan->srej_l);
4223 } 4575 }
4224} 4576}
4225 4577
4226static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4578static void l2cap_handle_rej(struct l2cap_chan *chan,
4579 struct l2cap_ctrl *control)
4227{ 4580{
4228 struct srej_list *new; 4581 struct sk_buff *skb;
4229 u32 control;
4230
4231 while (tx_seq != chan->expected_tx_seq) {
4232 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4233 control |= __set_reqseq(chan, chan->expected_tx_seq);
4234 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4235 l2cap_send_sframe(chan, control);
4236 4582
4237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4583 BT_DBG("chan %p, control %p", chan, control);
4238 if (!new)
4239 return -ENOMEM;
4240 4584
4241 new->tx_seq = chan->expected_tx_seq; 4585 if (control->reqseq == chan->next_tx_seq) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4588 return;
4589 }
4242 4590
4243 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4591 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4244 4592
4245 list_add_tail(&new->list, &chan->srej_l); 4593 if (chan->max_tx && skb &&
4594 bt_cb(skb)->control.retries >= chan->max_tx) {
4595 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4597 return;
4246 } 4598 }
4247 4599
4248 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4601
4602 l2cap_pass_to_tx(chan, control);
4249 4603
4250 return 0; 4604 if (control->final) {
4605 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4606 l2cap_retransmit_all(chan, control);
4607 } else {
4608 l2cap_retransmit_all(chan, control);
4609 l2cap_ertm_send(chan);
4610 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4611 set_bit(CONN_REJ_ACT, &chan->conn_state);
4612 }
4251} 4613}
4252 4614
4253static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4615static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4254{ 4616{
4255 u16 tx_seq = __get_txseq(chan, rx_control); 4617 BT_DBG("chan %p, txseq %d", chan, txseq);
4256 u16 req_seq = __get_reqseq(chan, rx_control);
4257 u8 sar = __get_ctrl_sar(chan, rx_control);
4258 int tx_seq_offset, expected_tx_seq_offset;
4259 int num_to_ack = (chan->tx_win/6) + 1;
4260 int err = 0;
4261 4618
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 4619 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4263 tx_seq, rx_control); 4620 chan->expected_tx_seq);
4264 4621
4265 if (__is_ctrl_final(chan, rx_control) && 4622 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4266 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4623 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4267 __clear_monitor_timer(chan); 4624 chan->tx_win) {
4268 if (chan->unacked_frames > 0) 4625 /* See notes below regarding "double poll" and
4269 __set_retrans_timer(chan); 4626 * invalid packets.
4270 clear_bit(CONN_WAIT_F, &chan->conn_state); 4627 */
4271 } 4628 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4629 BT_DBG("Invalid/Ignore - after SREJ");
4630 return L2CAP_TXSEQ_INVALID_IGNORE;
4631 } else {
4632 BT_DBG("Invalid - in window after SREJ sent");
4633 return L2CAP_TXSEQ_INVALID;
4634 }
4635 }
4272 4636
4273 chan->expected_ack_seq = req_seq; 4637 if (chan->srej_list.head == txseq) {
4274 l2cap_drop_acked_frames(chan); 4638 BT_DBG("Expected SREJ");
4639 return L2CAP_TXSEQ_EXPECTED_SREJ;
4640 }
4275 4641
4276 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4642 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4643 BT_DBG("Duplicate SREJ - txseq already stored");
4644 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4645 }
4277 4646
4278 /* invalid tx_seq */ 4647 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4279 if (tx_seq_offset >= chan->tx_win) { 4648 BT_DBG("Unexpected SREJ - not requested");
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4649 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4281 goto drop; 4650 }
4282 } 4651 }
4283 4652
4284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4653 if (chan->expected_tx_seq == txseq) {
4285 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4654 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4286 l2cap_send_ack(chan); 4655 chan->tx_win) {
4287 goto drop; 4656 BT_DBG("Invalid - txseq outside tx window");
4657 return L2CAP_TXSEQ_INVALID;
4658 } else {
4659 BT_DBG("Expected");
4660 return L2CAP_TXSEQ_EXPECTED;
4661 }
4288 } 4662 }
4289 4663
4290 if (tx_seq == chan->expected_tx_seq) 4664 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4291 goto expected; 4665 __seq_offset(chan, chan->expected_tx_seq,
4666 chan->last_acked_seq)){
4667 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4668 return L2CAP_TXSEQ_DUPLICATE;
4669 }
4670
4671 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4672 /* A source of invalid packets is a "double poll" condition,
4673 * where delays cause us to send multiple poll packets. If
4674 * the remote stack receives and processes both polls,
4675 * sequence numbers can wrap around in such a way that a
4676 * resent frame has a sequence number that looks like new data
4677 * with a sequence gap. This would trigger an erroneous SREJ
4678 * request.
4679 *
4680 * Fortunately, this is impossible with a tx window that's
4681 * less than half of the maximum sequence number, which allows
4682 * invalid frames to be safely ignored.
4683 *
4684 * With tx window sizes greater than half of the tx window
4685 * maximum, the frame is invalid and cannot be ignored. This
4686 * causes a disconnect.
4687 */
4292 4688
4293 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4689 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4294 struct srej_list *first; 4690 BT_DBG("Invalid/Ignore - txseq outside tx window");
4691 return L2CAP_TXSEQ_INVALID_IGNORE;
4692 } else {
4693 BT_DBG("Invalid - txseq outside tx window");
4694 return L2CAP_TXSEQ_INVALID;
4695 }
4696 } else {
4697 BT_DBG("Unexpected - txseq indicates missing frames");
4698 return L2CAP_TXSEQ_UNEXPECTED;
4699 }
4700}
4295 4701
4296 first = list_first_entry(&chan->srej_l, 4702static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4297 struct srej_list, list); 4703 struct l2cap_ctrl *control,
4298 if (tx_seq == first->tx_seq) { 4704 struct sk_buff *skb, u8 event)
4299 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4705{
4300 l2cap_check_srej_gap(chan, tx_seq); 4706 int err = 0;
4707 bool skb_in_use = 0;
4301 4708
4302 list_del(&first->list); 4709 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4303 kfree(first); 4710 event);
4304 4711
4305 if (list_empty(&chan->srej_l)) { 4712 switch (event) {
4306 chan->buffer_seq = chan->buffer_seq_srej; 4713 case L2CAP_EV_RECV_IFRAME:
4307 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 4714 switch (l2cap_classify_txseq(chan, control->txseq)) {
4308 l2cap_send_ack(chan); 4715 case L2CAP_TXSEQ_EXPECTED:
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan); 4716 l2cap_pass_to_tx(chan, control);
4717
4718 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4719 BT_DBG("Busy, discarding expected seq %d",
4720 control->txseq);
4721 break;
4310 } 4722 }
4311 } else {
4312 struct srej_list *l;
4313 4723
4314 /* duplicated tx_seq */ 4724 chan->expected_tx_seq = __next_seq(chan,
4315 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 4725 control->txseq);
4316 goto drop; 4726
4727 chan->buffer_seq = chan->expected_tx_seq;
4728 skb_in_use = 1;
4729
4730 err = l2cap_reassemble_sdu(chan, skb, control);
4731 if (err)
4732 break;
4317 4733
4318 list_for_each_entry(l, &chan->srej_l, list) { 4734 if (control->final) {
4319 if (l->tx_seq == tx_seq) { 4735 if (!test_and_clear_bit(CONN_REJ_ACT,
4320 l2cap_resend_srejframe(chan, tx_seq); 4736 &chan->conn_state)) {
4321 return 0; 4737 control->final = 0;
4738 l2cap_retransmit_all(chan, control);
4739 l2cap_ertm_send(chan);
4322 } 4740 }
4323 } 4741 }
4324 4742
4325 err = l2cap_send_srejframe(chan, tx_seq); 4743 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4326 if (err < 0) { 4744 l2cap_send_ack(chan);
4327 l2cap_send_disconn_req(chan->conn, chan, -err); 4745 break;
4328 return err; 4746 case L2CAP_TXSEQ_UNEXPECTED:
4747 l2cap_pass_to_tx(chan, control);
4748
4749 /* Can't issue SREJ frames in the local busy state.
4750 * Drop this frame, it will be seen as missing
4751 * when local busy is exited.
4752 */
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding unexpected seq %d",
4755 control->txseq);
4756 break;
4329 } 4757 }
4330 }
4331 } else {
4332 expected_tx_seq_offset = __seq_offset(chan,
4333 chan->expected_tx_seq, chan->buffer_seq);
4334 4758
4335 /* duplicated tx_seq */ 4759 /* There was a gap in the sequence, so an SREJ
4336 if (tx_seq_offset < expected_tx_seq_offset) 4760 * must be sent for each missing frame. The
4337 goto drop; 4761 * current frame is stored for later use.
4338 4762 */
4339 set_bit(CONN_SREJ_SENT, &chan->conn_state); 4763 skb_queue_tail(&chan->srej_q, skb);
4764 skb_in_use = 1;
4765 BT_DBG("Queued %p (queue len %d)", skb,
4766 skb_queue_len(&chan->srej_q));
4340 4767
4341 BT_DBG("chan %p, Enter SREJ", chan); 4768 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4769 l2cap_seq_list_clear(&chan->srej_list);
4770 l2cap_send_srej(chan, control->txseq);
4342 4771
4343 INIT_LIST_HEAD(&chan->srej_l); 4772 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4344 chan->buffer_seq_srej = chan->buffer_seq; 4773 break;
4774 case L2CAP_TXSEQ_DUPLICATE:
4775 l2cap_pass_to_tx(chan, control);
4776 break;
4777 case L2CAP_TXSEQ_INVALID_IGNORE:
4778 break;
4779 case L2CAP_TXSEQ_INVALID:
4780 default:
4781 l2cap_send_disconn_req(chan->conn, chan,
4782 ECONNRESET);
4783 break;
4784 }
4785 break;
4786 case L2CAP_EV_RECV_RR:
4787 l2cap_pass_to_tx(chan, control);
4788 if (control->final) {
4789 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4345 4790
4346 __skb_queue_head_init(&chan->srej_q); 4791 if (!test_and_clear_bit(CONN_REJ_ACT,
4347 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4792 &chan->conn_state)) {
4793 control->final = 0;
4794 l2cap_retransmit_all(chan, control);
4795 }
4348 4796
4349 /* Set P-bit only if there are some I-frames to ack. */ 4797 l2cap_ertm_send(chan);
4350 if (__clear_ack_timer(chan)) 4798 } else if (control->poll) {
4351 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4799 l2cap_send_i_or_rr_or_rnr(chan);
4800 } else {
4801 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4802 &chan->conn_state) &&
4803 chan->unacked_frames)
4804 __set_retrans_timer(chan);
4352 4805
4353 err = l2cap_send_srejframe(chan, tx_seq); 4806 l2cap_ertm_send(chan);
4354 if (err < 0) {
4355 l2cap_send_disconn_req(chan->conn, chan, -err);
4356 return err;
4357 } 4807 }
4358 } 4808 break;
4359 return 0; 4809 case L2CAP_EV_RECV_RNR:
4360 4810 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4361expected: 4811 l2cap_pass_to_tx(chan, control);
4362 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4812 if (control && control->poll) {
4363 4813 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4364 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4814 l2cap_send_rr_or_rnr(chan, 0);
4365 bt_cb(skb)->control.txseq = tx_seq; 4815 }
4366 bt_cb(skb)->control.sar = sar; 4816 __clear_retrans_timer(chan);
4367 __skb_queue_tail(&chan->srej_q, skb); 4817 l2cap_seq_list_clear(&chan->retrans_list);
4368 return 0; 4818 break;
4819 case L2CAP_EV_RECV_REJ:
4820 l2cap_handle_rej(chan, control);
4821 break;
4822 case L2CAP_EV_RECV_SREJ:
4823 l2cap_handle_srej(chan, control);
4824 break;
4825 default:
4826 break;
4369 } 4827 }
4370 4828
4371 err = l2cap_reassemble_sdu(chan, skb, rx_control); 4829 if (skb && !skb_in_use) {
4372 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 4830 BT_DBG("Freeing %p", skb);
4373 4831 kfree_skb(skb);
4374 if (err < 0) {
4375 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4376 return err;
4377 } 4832 }
4378 4833
4379 if (__is_ctrl_final(chan, rx_control)) { 4834 return err;
4380 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4835}
4381 l2cap_retransmit_frames(chan);
4382 }
4383 4836
4837static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4838 struct l2cap_ctrl *control,
4839 struct sk_buff *skb, u8 event)
4840{
4841 int err = 0;
4842 u16 txseq = control->txseq;
4843 bool skb_in_use = 0;
4844
4845 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4846 event);
4847
4848 switch (event) {
4849 case L2CAP_EV_RECV_IFRAME:
4850 switch (l2cap_classify_txseq(chan, txseq)) {
4851 case L2CAP_TXSEQ_EXPECTED:
4852 /* Keep frame for reassembly later */
4853 l2cap_pass_to_tx(chan, control);
4854 skb_queue_tail(&chan->srej_q, skb);
4855 skb_in_use = 1;
4856 BT_DBG("Queued %p (queue len %d)", skb,
4857 skb_queue_len(&chan->srej_q));
4858
4859 chan->expected_tx_seq = __next_seq(chan, txseq);
4860 break;
4861 case L2CAP_TXSEQ_EXPECTED_SREJ:
4862 l2cap_seq_list_pop(&chan->srej_list);
4384 4863
4385 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 4864 l2cap_pass_to_tx(chan, control);
4386 if (chan->num_acked == num_to_ack - 1) 4865 skb_queue_tail(&chan->srej_q, skb);
4387 l2cap_send_ack(chan); 4866 skb_in_use = 1;
4388 else 4867 BT_DBG("Queued %p (queue len %d)", skb,
4389 __set_ack_timer(chan); 4868 skb_queue_len(&chan->srej_q));
4390 4869
4391 return 0; 4870 err = l2cap_rx_queued_iframes(chan);
4871 if (err)
4872 break;
4392 4873
4393drop: 4874 break;
4394 kfree_skb(skb); 4875 case L2CAP_TXSEQ_UNEXPECTED:
4395 return 0; 4876 /* Got a frame that can't be reassembled yet.
4396} 4877 * Save it for later, and send SREJs to cover
4878 * the missing frames.
4879 */
4880 skb_queue_tail(&chan->srej_q, skb);
4881 skb_in_use = 1;
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4884
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej(chan, control->txseq);
4887 break;
4888 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4889 /* This frame was requested with an SREJ, but
4890 * some expected retransmitted frames are
4891 * missing. Request retransmission of missing
4892 * SREJ'd frames.
4893 */
4894 skb_queue_tail(&chan->srej_q, skb);
4895 skb_in_use = 1;
4896 BT_DBG("Queued %p (queue len %d)", skb,
4897 skb_queue_len(&chan->srej_q));
4898
4899 l2cap_pass_to_tx(chan, control);
4900 l2cap_send_srej_list(chan, control->txseq);
4901 break;
4902 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4903 /* We've already queued this frame. Drop this copy. */
4904 l2cap_pass_to_tx(chan, control);
4905 break;
4906 case L2CAP_TXSEQ_DUPLICATE:
4907 /* Expecting a later sequence number, so this frame
4908 * was already received. Ignore it completely.
4909 */
4910 break;
4911 case L2CAP_TXSEQ_INVALID_IGNORE:
4912 break;
4913 case L2CAP_TXSEQ_INVALID:
4914 default:
4915 l2cap_send_disconn_req(chan->conn, chan,
4916 ECONNRESET);
4917 break;
4918 }
4919 break;
4920 case L2CAP_EV_RECV_RR:
4921 l2cap_pass_to_tx(chan, control);
4922 if (control->final) {
4923 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4397 4924
4398static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 4925 if (!test_and_clear_bit(CONN_REJ_ACT,
4399{ 4926 &chan->conn_state)) {
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 4927 control->final = 0;
4401 __get_reqseq(chan, rx_control), rx_control); 4928 l2cap_retransmit_all(chan, control);
4929 }
4402 4930
4403 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 4931 l2cap_ertm_send(chan);
4404 l2cap_drop_acked_frames(chan); 4932 } else if (control->poll) {
4933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4934 &chan->conn_state) &&
4935 chan->unacked_frames) {
4936 __set_retrans_timer(chan);
4937 }
4405 4938
4406 if (__is_ctrl_poll(chan, rx_control)) { 4939 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4407 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4940 l2cap_send_srej_tail(chan);
4408 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4941 } else {
4409 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4942 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4410 (chan->unacked_frames > 0)) 4943 &chan->conn_state) &&
4944 chan->unacked_frames)
4411 __set_retrans_timer(chan); 4945 __set_retrans_timer(chan);
4412 4946
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4947 l2cap_send_ack(chan);
4414 l2cap_send_srejtail(chan); 4948 }
4949 break;
4950 case L2CAP_EV_RECV_RNR:
4951 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4952 l2cap_pass_to_tx(chan, control);
4953 if (control->poll) {
4954 l2cap_send_srej_tail(chan);
4415 } else { 4955 } else {
4416 l2cap_send_i_or_rr_or_rnr(chan); 4956 struct l2cap_ctrl rr_control;
4957 memset(&rr_control, 0, sizeof(rr_control));
4958 rr_control.sframe = 1;
4959 rr_control.super = L2CAP_SUPER_RR;
4960 rr_control.reqseq = chan->buffer_seq;
4961 l2cap_send_sframe(chan, &rr_control);
4417 } 4962 }
4418 4963
4419 } else if (__is_ctrl_final(chan, rx_control)) { 4964 break;
4420 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4965 case L2CAP_EV_RECV_REJ:
4421 4966 l2cap_handle_rej(chan, control);
4422 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4967 break;
4423 l2cap_retransmit_frames(chan); 4968 case L2CAP_EV_RECV_SREJ:
4424 4969 l2cap_handle_srej(chan, control);
4425 } else { 4970 break;
4426 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4971 }
4427 (chan->unacked_frames > 0))
4428 __set_retrans_timer(chan);
4429 4972
4430 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4973 if (skb && !skb_in_use) {
4431 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4974 BT_DBG("Freeing %p", skb);
4432 l2cap_send_ack(chan); 4975 kfree_skb(skb);
4433 else
4434 l2cap_ertm_send(chan);
4435 } 4976 }
4977
4978 return err;
4436} 4979}
4437 4980
4438static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4981static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4439{ 4982{
4440 u16 tx_seq = __get_reqseq(chan, rx_control); 4983 /* Make sure reqseq is for a packet that has been sent but not acked */
4441 4984 u16 unacked;
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4443
4444 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4445
4446 chan->expected_ack_seq = tx_seq;
4447 l2cap_drop_acked_frames(chan);
4448 4985
4449 if (__is_ctrl_final(chan, rx_control)) { 4986 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4450 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4987 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4451 l2cap_retransmit_frames(chan);
4452 } else {
4453 l2cap_retransmit_frames(chan);
4454
4455 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4456 set_bit(CONN_REJ_ACT, &chan->conn_state);
4457 }
4458} 4988}
4459static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4460{
4461 u16 tx_seq = __get_reqseq(chan, rx_control);
4462 4989
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 4990static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4464 4991 struct sk_buff *skb, u8 event)
4465 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4992{
4466 4993 int err = 0;
4467 if (__is_ctrl_poll(chan, rx_control)) {
4468 chan->expected_ack_seq = tx_seq;
4469 l2cap_drop_acked_frames(chan);
4470
4471 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4472 l2cap_retransmit_one_frame(chan, tx_seq);
4473 4994
4474 l2cap_ertm_send(chan); 4995 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4996 control, skb, event, chan->rx_state);
4475 4997
4476 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4998 if (__valid_reqseq(chan, control->reqseq)) {
4477 chan->srej_save_reqseq = tx_seq; 4999 switch (chan->rx_state) {
4478 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5000 case L2CAP_RX_STATE_RECV:
5001 err = l2cap_rx_state_recv(chan, control, skb, event);
5002 break;
5003 case L2CAP_RX_STATE_SREJ_SENT:
5004 err = l2cap_rx_state_srej_sent(chan, control, skb,
5005 event);
5006 break;
5007 default:
5008 /* shut it down */
5009 break;
4479 } 5010 }
4480 } else if (__is_ctrl_final(chan, rx_control)) {
4481 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4482 chan->srej_save_reqseq == tx_seq)
4483 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4484 else
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4486 } else { 5011 } else {
4487 l2cap_retransmit_one_frame(chan, tx_seq); 5012 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4488 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 5013 control->reqseq, chan->next_tx_seq,
4489 chan->srej_save_reqseq = tx_seq; 5014 chan->expected_ack_seq);
4490 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5015 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4491 }
4492 } 5016 }
5017
5018 return err;
4493} 5019}
4494 5020
4495static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 5021static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5022 struct sk_buff *skb)
4496{ 5023{
4497 u16 tx_seq = __get_reqseq(chan, rx_control); 5024 int err = 0;
4498 5025
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 5026 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5027 chan->rx_state);
4500 5028
4501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5029 if (l2cap_classify_txseq(chan, control->txseq) ==
4502 chan->expected_ack_seq = tx_seq; 5030 L2CAP_TXSEQ_EXPECTED) {
4503 l2cap_drop_acked_frames(chan); 5031 l2cap_pass_to_tx(chan, control);
4504 5032
4505 if (__is_ctrl_poll(chan, rx_control)) 5033 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4506 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5034 __next_seq(chan, chan->buffer_seq));
4507 5035
4508 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 5036 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4509 __clear_retrans_timer(chan);
4510 if (__is_ctrl_poll(chan, rx_control))
4511 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4512 return;
4513 }
4514 5037
4515 if (__is_ctrl_poll(chan, rx_control)) { 5038 l2cap_reassemble_sdu(chan, skb, control);
4516 l2cap_send_srejtail(chan);
4517 } else { 5039 } else {
4518 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 5040 if (chan->sdu) {
4519 l2cap_send_sframe(chan, rx_control); 5041 kfree_skb(chan->sdu);
4520 } 5042 chan->sdu = NULL;
4521} 5043 }
4522 5044 chan->sdu_last_frag = NULL;
4523static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 5045 chan->sdu_len = 0;
4524{
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4526 5046
4527 if (__is_ctrl_final(chan, rx_control) && 5047 if (skb) {
4528 test_bit(CONN_WAIT_F, &chan->conn_state)) { 5048 BT_DBG("Freeing %p", skb);
4529 __clear_monitor_timer(chan); 5049 kfree_skb(skb);
4530 if (chan->unacked_frames > 0) 5050 }
4531 __set_retrans_timer(chan);
4532 clear_bit(CONN_WAIT_F, &chan->conn_state);
4533 } 5051 }
4534 5052
4535 switch (__get_ctrl_super(chan, rx_control)) { 5053 chan->last_acked_seq = control->txseq;
4536 case L2CAP_SUPER_RR: 5054 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4537 l2cap_data_channel_rrframe(chan, rx_control);
4538 break;
4539
4540 case L2CAP_SUPER_REJ:
4541 l2cap_data_channel_rejframe(chan, rx_control);
4542 break;
4543
4544 case L2CAP_SUPER_SREJ:
4545 l2cap_data_channel_srejframe(chan, rx_control);
4546 break;
4547
4548 case L2CAP_SUPER_RNR:
4549 l2cap_data_channel_rnrframe(chan, rx_control);
4550 break;
4551 }
4552 5055
4553 kfree_skb(skb); 5056 return err;
4554 return 0;
4555} 5057}
4556 5058
4557static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 5059static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4558{ 5060{
4559 u32 control; 5061 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4560 u16 req_seq; 5062 u16 len;
4561 int len, next_tx_seq_offset, req_seq_offset; 5063 u8 event;
4562 5064
4563 __unpack_control(chan, skb); 5065 __unpack_control(chan, skb);
4564 5066
4565 control = __get_control(chan, skb->data);
4566 skb_pull(skb, __ctrl_size(chan));
4567 len = skb->len; 5067 len = skb->len;
4568 5068
4569 /* 5069 /*
4570 * We can just drop the corrupted I-frame here. 5070 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery 5071 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission. 5072 * procedures and ask for retransmission.
4573 */ 5073 */
4574 if (l2cap_check_fcs(chan, skb)) 5074 if (l2cap_check_fcs(chan, skb))
4575 goto drop; 5075 goto drop;
4576 5076
4577 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 5077 if (!control->sframe && control->sar == L2CAP_SAR_START)
4578 len -= L2CAP_SDULEN_SIZE; 5078 len -= L2CAP_SDULEN_SIZE;
4579 5079
4580 if (chan->fcs == L2CAP_FCS_CRC16) 5080 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4585,34 +5085,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4585 goto drop; 5085 goto drop;
4586 } 5086 }
4587 5087
4588 req_seq = __get_reqseq(chan, control); 5088 if (!control->sframe) {
4589 5089 int err;
4590 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4591 5090
4592 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, 5091 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4593 chan->expected_ack_seq); 5092 control->sar, control->reqseq, control->final,
5093 control->txseq);
4594 5094
4595 /* check for invalid req-seq */ 5095 /* Validate F-bit - F=0 always valid, F=1 only
4596 if (req_seq_offset > next_tx_seq_offset) { 5096 * valid in TX WAIT_F
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5097 */
4598 goto drop; 5098 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4599 }
4600
4601 if (!__is_sframe(chan, control)) {
4602 if (len < 0) {
4603 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4604 goto drop; 5099 goto drop;
5100
5101 if (chan->mode != L2CAP_MODE_STREAMING) {
5102 event = L2CAP_EV_RECV_IFRAME;
5103 err = l2cap_rx(chan, control, skb, event);
5104 } else {
5105 err = l2cap_stream_rx(chan, control, skb);
4605 } 5106 }
4606 5107
4607 l2cap_data_channel_iframe(chan, control, skb); 5108 if (err)
5109 l2cap_send_disconn_req(chan->conn, chan,
5110 ECONNRESET);
4608 } else { 5111 } else {
5112 const u8 rx_func_to_event[4] = {
5113 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5114 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5115 };
5116
5117 /* Only I-frames are expected in streaming mode */
5118 if (chan->mode == L2CAP_MODE_STREAMING)
5119 goto drop;
5120
5121 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5122 control->reqseq, control->final, control->poll,
5123 control->super);
5124
4609 if (len != 0) { 5125 if (len != 0) {
4610 BT_ERR("%d", len); 5126 BT_ERR("%d", len);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4612 goto drop; 5128 goto drop;
4613 } 5129 }
4614 5130
4615 l2cap_data_channel_sframe(chan, control, skb); 5131 /* Validate F and P bits */
5132 if (control->final && (control->poll ||
5133 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5134 goto drop;
5135
5136 event = rx_func_to_event[control->super];
5137 if (l2cap_rx(chan, control, skb, event))
5138 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4616 } 5139 }
4617 5140
4618 return 0; 5141 return 0;
@@ -4622,19 +5145,27 @@ drop:
4622 return 0; 5145 return 0;
4623} 5146}
4624 5147
4625static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 5148static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5149 struct sk_buff *skb)
4626{ 5150{
4627 struct l2cap_chan *chan; 5151 struct l2cap_chan *chan;
4628 u32 control;
4629 u16 tx_seq;
4630 int len;
4631 5152
4632 chan = l2cap_get_chan_by_scid(conn, cid); 5153 chan = l2cap_get_chan_by_scid(conn, cid);
4633 if (!chan) { 5154 if (!chan) {
4634 BT_DBG("unknown cid 0x%4.4x", cid); 5155 if (cid == L2CAP_CID_A2MP) {
4635 /* Drop packet and return */ 5156 chan = a2mp_channel_create(conn, skb);
4636 kfree_skb(skb); 5157 if (!chan) {
4637 return 0; 5158 kfree_skb(skb);
5159 return;
5160 }
5161
5162 l2cap_chan_lock(chan);
5163 } else {
5164 BT_DBG("unknown cid 0x%4.4x", cid);
5165 /* Drop packet and return */
5166 kfree_skb(skb);
5167 return;
5168 }
4638 } 5169 }
4639 5170
4640 BT_DBG("chan %p, len %d", chan, skb->len); 5171 BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4652,49 +5183,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4652 if (chan->imtu < skb->len) 5183 if (chan->imtu < skb->len)
4653 goto drop; 5184 goto drop;
4654 5185
4655 if (!chan->ops->recv(chan->data, skb)) 5186 if (!chan->ops->recv(chan, skb))
4656 goto done; 5187 goto done;
4657 break; 5188 break;
4658 5189
4659 case L2CAP_MODE_ERTM: 5190 case L2CAP_MODE_ERTM:
4660 l2cap_ertm_data_rcv(chan, skb);
4661
4662 goto done;
4663
4664 case L2CAP_MODE_STREAMING: 5191 case L2CAP_MODE_STREAMING:
4665 control = __get_control(chan, skb->data); 5192 l2cap_data_rcv(chan, skb);
4666 skb_pull(skb, __ctrl_size(chan));
4667 len = skb->len;
4668
4669 if (l2cap_check_fcs(chan, skb))
4670 goto drop;
4671
4672 if (__is_sar_start(chan, control))
4673 len -= L2CAP_SDULEN_SIZE;
4674
4675 if (chan->fcs == L2CAP_FCS_CRC16)
4676 len -= L2CAP_FCS_SIZE;
4677
4678 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4679 goto drop;
4680
4681 tx_seq = __get_txseq(chan, control);
4682
4683 if (chan->expected_tx_seq != tx_seq) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan->sdu);
4686 chan->sdu = NULL;
4687 chan->sdu_last_frag = NULL;
4688 chan->sdu_len = 0;
4689
4690 /* TODO: Notify userland of missing data */
4691 }
4692
4693 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4694
4695 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4697
4698 goto done; 5193 goto done;
4699 5194
4700 default: 5195 default:
@@ -4707,11 +5202,10 @@ drop:
4707 5202
4708done: 5203done:
4709 l2cap_chan_unlock(chan); 5204 l2cap_chan_unlock(chan);
4710
4711 return 0;
4712} 5205}
4713 5206
4714static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 5207static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5208 struct sk_buff *skb)
4715{ 5209{
4716 struct l2cap_chan *chan; 5210 struct l2cap_chan *chan;
4717 5211
@@ -4727,17 +5221,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4727 if (chan->imtu < skb->len) 5221 if (chan->imtu < skb->len)
4728 goto drop; 5222 goto drop;
4729 5223
4730 if (!chan->ops->recv(chan->data, skb)) 5224 if (!chan->ops->recv(chan, skb))
4731 return 0; 5225 return;
4732 5226
4733drop: 5227drop:
4734 kfree_skb(skb); 5228 kfree_skb(skb);
4735
4736 return 0;
4737} 5229}
4738 5230
4739static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, 5231static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4740 struct sk_buff *skb) 5232 struct sk_buff *skb)
4741{ 5233{
4742 struct l2cap_chan *chan; 5234 struct l2cap_chan *chan;
4743 5235
@@ -4753,13 +5245,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4753 if (chan->imtu < skb->len) 5245 if (chan->imtu < skb->len)
4754 goto drop; 5246 goto drop;
4755 5247
4756 if (!chan->ops->recv(chan->data, skb)) 5248 if (!chan->ops->recv(chan, skb))
4757 return 0; 5249 return;
4758 5250
4759drop: 5251drop:
4760 kfree_skb(skb); 5252 kfree_skb(skb);
4761
4762 return 0;
4763} 5253}
4764 5254
4765static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 5255static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4787,7 +5277,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4787 5277
4788 case L2CAP_CID_CONN_LESS: 5278 case L2CAP_CID_CONN_LESS:
4789 psm = get_unaligned((__le16 *) skb->data); 5279 psm = get_unaligned((__le16 *) skb->data);
4790 skb_pull(skb, 2); 5280 skb_pull(skb, L2CAP_PSMLEN_SIZE);
4791 l2cap_conless_channel(conn, psm, skb); 5281 l2cap_conless_channel(conn, psm, skb);
4792 break; 5282 break;
4793 5283
@@ -4898,7 +5388,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4898 if (!conn) 5388 if (!conn)
4899 return 0; 5389 return 0;
4900 5390
4901 BT_DBG("conn %p", conn); 5391 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
4902 5392
4903 if (hcon->type == LE_LINK) { 5393 if (hcon->type == LE_LINK) {
4904 if (!status && encrypt) 5394 if (!status && encrypt)
@@ -4911,7 +5401,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4911 list_for_each_entry(chan, &conn->chan_l, list) { 5401 list_for_each_entry(chan, &conn->chan_l, list) {
4912 l2cap_chan_lock(chan); 5402 l2cap_chan_lock(chan);
4913 5403
4914 BT_DBG("chan->scid %d", chan->scid); 5404 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5405 state_to_string(chan->state));
4915 5406
4916 if (chan->scid == L2CAP_CID_LE_DATA) { 5407 if (chan->scid == L2CAP_CID_LE_DATA) {
4917 if (!status && encrypt) { 5408 if (!status && encrypt) {
@@ -4981,6 +5472,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4981 rsp.status = cpu_to_le16(stat); 5472 rsp.status = cpu_to_le16(stat);
4982 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 5473 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4983 sizeof(rsp), &rsp); 5474 sizeof(rsp), &rsp);
5475
5476 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5477 res == L2CAP_CR_SUCCESS) {
5478 char buf[128];
5479 set_bit(CONF_REQ_SENT, &chan->conf_state);
5480 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5481 L2CAP_CONF_REQ,
5482 l2cap_build_conf_req(chan, buf),
5483 buf);
5484 chan->num_conf_req++;
5485 }
4984 } 5486 }
4985 5487
4986 l2cap_chan_unlock(chan); 5488 l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d48..a4bb27e8427e 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
27 27
28/* Bluetooth L2CAP sockets. */ 28/* Bluetooth L2CAP sockets. */
29 29
30#include <linux/security.h>
31#include <linux/export.h> 30#include <linux/export.h>
32 31
33#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
89 if (err < 0) 88 if (err < 0)
90 goto done; 89 goto done;
91 90
92 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 91 if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
93 __le16_to_cpu(la.l2_psm) == 0x0003) 92 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
94 chan->sec_level = BT_SECURITY_SDP; 93 chan->sec_level = BT_SECURITY_SDP;
95 94
96 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 95 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
446 return err; 445 return err;
447} 446}
448 447
448static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
449{
450 switch (chan->scid) {
451 case L2CAP_CID_LE_DATA:
452 if (mtu < L2CAP_LE_MIN_MTU)
453 return false;
454 break;
455
456 default:
457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
458 return false;
459 }
460
461 return true;
462}
463
449static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 464static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
450{ 465{
451 struct sock *sk = sock->sk; 466 struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
484 break; 499 break;
485 } 500 }
486 501
502 if (!l2cap_valid_mtu(chan, opts.imtu)) {
503 err = -EINVAL;
504 break;
505 }
506
487 chan->mode = opts.mode; 507 chan->mode = opts.mode;
488 switch (chan->mode) { 508 switch (chan->mode) {
489 case L2CAP_MODE_BASIC: 509 case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
873 return err; 893 return err;
874} 894}
875 895
876static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) 896static void l2cap_sock_cleanup_listen(struct sock *parent)
877{ 897{
878 struct sock *sk, *parent = data; 898 struct sock *sk;
899
900 BT_DBG("parent %p", parent);
901
902 /* Close not yet accepted channels */
903 while ((sk = bt_accept_dequeue(parent, NULL))) {
904 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
905
906 l2cap_chan_lock(chan);
907 __clear_chan_timer(chan);
908 l2cap_chan_close(chan, ECONNRESET);
909 l2cap_chan_unlock(chan);
910
911 l2cap_sock_kill(sk);
912 }
913}
914
915static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
916{
917 struct sock *sk, *parent = chan->data;
918
919 /* Check for backlog size */
920 if (sk_acceptq_is_full(parent)) {
921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
922 return NULL;
923 }
879 924
880 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
881 GFP_ATOMIC); 926 GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
889 return l2cap_pi(sk)->chan; 934 return l2cap_pi(sk)->chan;
890} 935}
891 936
892static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) 937static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
893{ 938{
894 int err; 939 int err;
895 struct sock *sk = data; 940 struct sock *sk = chan->data;
896 struct l2cap_pinfo *pi = l2cap_pi(sk); 941 struct l2cap_pinfo *pi = l2cap_pi(sk);
897 942
898 lock_sock(sk); 943 lock_sock(sk);
@@ -925,16 +970,57 @@ done:
925 return err; 970 return err;
926} 971}
927 972
928static void l2cap_sock_close_cb(void *data) 973static void l2cap_sock_close_cb(struct l2cap_chan *chan)
929{ 974{
930 struct sock *sk = data; 975 struct sock *sk = chan->data;
931 976
932 l2cap_sock_kill(sk); 977 l2cap_sock_kill(sk);
933} 978}
934 979
935static void l2cap_sock_state_change_cb(void *data, int state) 980static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
936{ 981{
937 struct sock *sk = data; 982 struct sock *sk = chan->data;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 sock_set_flag(sk, SOCK_ZAPPED);
990
991 switch (chan->state) {
992 case BT_OPEN:
993 case BT_BOUND:
994 case BT_CLOSED:
995 break;
996 case BT_LISTEN:
997 l2cap_sock_cleanup_listen(sk);
998 sk->sk_state = BT_CLOSED;
999 chan->state = BT_CLOSED;
1000
1001 break;
1002 default:
1003 sk->sk_state = BT_CLOSED;
1004 chan->state = BT_CLOSED;
1005
1006 sk->sk_err = err;
1007
1008 if (parent) {
1009 bt_accept_unlink(sk);
1010 parent->sk_data_ready(parent, 0);
1011 } else {
1012 sk->sk_state_change(sk);
1013 }
1014
1015 break;
1016 }
1017
1018 release_sock(sk);
1019}
1020
1021static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
1022{
1023 struct sock *sk = chan->data;
938 1024
939 sk->sk_state = state; 1025 sk->sk_state = state;
940} 1026}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
955 return skb; 1041 return skb;
956} 1042}
957 1043
1044static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1045{
1046 struct sock *sk = chan->data;
1047 struct sock *parent;
1048
1049 lock_sock(sk);
1050
1051 parent = bt_sk(sk)->parent;
1052
1053 BT_DBG("sk %p, parent %p", sk, parent);
1054
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 release_sock(sk);
1062}
1063
958static struct l2cap_ops l2cap_chan_ops = { 1064static struct l2cap_ops l2cap_chan_ops = {
959 .name = "L2CAP Socket Interface", 1065 .name = "L2CAP Socket Interface",
960 .new_connection = l2cap_sock_new_connection_cb, 1066 .new_connection = l2cap_sock_new_connection_cb,
961 .recv = l2cap_sock_recv_cb, 1067 .recv = l2cap_sock_recv_cb,
962 .close = l2cap_sock_close_cb, 1068 .close = l2cap_sock_close_cb,
1069 .teardown = l2cap_sock_teardown_cb,
963 .state_change = l2cap_sock_state_change_cb, 1070 .state_change = l2cap_sock_state_change_cb,
1071 .ready = l2cap_sock_ready_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb, 1072 .alloc_skb = l2cap_sock_alloc_skb_cb,
965}; 1073};
966 1074
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f36..e1c97527e16c 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt 27#define pr_fmt(fmt) "Bluetooth: " fmt
28 28
29#include <linux/module.h> 29#include <linux/export.h>
30
31#include <linux/kernel.h>
32#include <linux/stddef.h>
33#include <linux/string.h>
34#include <asm/errno.h>
35 30
36#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
37 32
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5e3362ea00..ad6613d17ca6 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
24 24
25/* Bluetooth HCI Management interface */ 25/* Bluetooth HCI Management interface */
26 26
27#include <linux/kernel.h>
28#include <linux/uaccess.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <asm/unaligned.h> 28#include <asm/unaligned.h>
31 29
@@ -212,7 +210,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
212 210
213 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 211 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
214 212
215 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); 213 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
216 if (!skb) 214 if (!skb)
217 return -ENOMEM; 215 return -ENOMEM;
218 216
@@ -243,7 +241,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
243 241
244 BT_DBG("sock %p", sk); 242 BT_DBG("sock %p", sk);
245 243
246 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC); 244 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
247 if (!skb) 245 if (!skb)
248 return -ENOMEM; 246 return -ENOMEM;
249 247
@@ -689,14 +687,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
689{ 687{
690 struct pending_cmd *cmd; 688 struct pending_cmd *cmd;
691 689
692 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 690 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
693 if (!cmd) 691 if (!cmd)
694 return NULL; 692 return NULL;
695 693
696 cmd->opcode = opcode; 694 cmd->opcode = opcode;
697 cmd->index = hdev->id; 695 cmd->index = hdev->id;
698 696
699 cmd->param = kmalloc(len, GFP_ATOMIC); 697 cmd->param = kmalloc(len, GFP_KERNEL);
700 if (!cmd->param) { 698 if (!cmd->param) {
701 kfree(cmd); 699 kfree(cmd);
702 return NULL; 700 return NULL;
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
714} 712}
715 713
716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 714static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
717 void (*cb)(struct pending_cmd *cmd, void *data), 715 void (*cb)(struct pending_cmd *cmd,
716 void *data),
718 void *data) 717 void *data)
719{ 718{
720 struct list_head *p, *n; 719 struct list_head *p, *n;
@@ -813,7 +812,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
813 struct sk_buff *skb; 812 struct sk_buff *skb;
814 struct mgmt_hdr *hdr; 813 struct mgmt_hdr *hdr;
815 814
816 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 815 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
817 if (!skb) 816 if (!skb)
818 return -ENOMEM; 817 return -ENOMEM;
819 818
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
871 } 870 }
872 871
873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 872 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 873 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
876 MGMT_STATUS_BUSY); 875 MGMT_STATUS_BUSY);
877 goto failed; 876 goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
978 } 977 }
979 978
980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 979 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 980 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 981 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
983 MGMT_STATUS_BUSY); 982 MGMT_STATUS_BUSY);
984 goto failed; 983 goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1001 scan = 0; 1000 scan = 0;
1002 1001
1003 if (test_bit(HCI_ISCAN, &hdev->flags) && 1002 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0) 1003 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off); 1004 cancel_delayed_work(&hdev->discov_off);
1006 } 1005 }
1007 1006
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 bool changed = false; 1055 bool changed = false;
1057 1056
1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1057 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059 &hdev->dev_flags)) { 1058 &hdev->dev_flags)) {
1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1059 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061 changed = true; 1060 changed = true;
1062 } 1061 }
@@ -1269,7 +1268,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1269 goto failed; 1268 goto failed;
1270 } 1269 }
1271 1270
1272 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 1271 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1273 if (!uuid) { 1272 if (!uuid) {
1274 err = -ENOMEM; 1273 err = -ENOMEM;
1275 goto failed; 1274 goto failed;
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
1317} 1316}
1318 1317
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1318static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320 u16 len) 1319 u16 len)
1321{ 1320{
1322 struct mgmt_cp_remove_uuid *cp = data; 1321 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd; 1322 struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
1442} 1441}
1443 1442
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1443static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445 u16 len) 1444 u16 len)
1446{ 1445{
1447 struct mgmt_cp_load_link_keys *cp = data; 1446 struct mgmt_cp_load_link_keys *cp = data;
1448 u16 key_count, expected_len; 1447 u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1454 sizeof(struct mgmt_link_key_info); 1453 sizeof(struct mgmt_link_key_info);
1455 if (expected_len != len) { 1454 if (expected_len != len) {
1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1455 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457 len, expected_len); 1456 len, expected_len);
1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459 MGMT_STATUS_INVALID_PARAMS); 1458 MGMT_STATUS_INVALID_PARAMS);
1460 } 1459 }
1461 1460
1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1461 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463 key_count); 1462 key_count);
1464 1463
1465 hci_dev_lock(hdev); 1464 hci_dev_lock(hdev);
1466 1465
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1535 if (cp->disconnect) { 1534 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR) 1535 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr); 1537 &cp->addr.bdaddr);
1539 else 1538 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr); 1540 &cp->addr.bdaddr);
1542 } else { 1541 } else {
1543 conn = NULL; 1542 conn = NULL;
1544 } 1543 }
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1594 } 1593 }
1595 1594
1596 if (cp->addr.type == BDADDR_BREDR) 1595 if (cp->addr.type == BDADDR_BREDR)
1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1597 &cp->addr.bdaddr);
1598 else 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1600 1600
@@ -1611,7 +1611,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1611 } 1611 }
1612 1612
1613 dc.handle = cpu_to_le16(conn->handle); 1613 dc.handle = cpu_to_le16(conn->handle);
1614 dc.reason = 0x13; /* Remote User Terminated Connection */ 1614 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1615 1615
1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1617 if (err < 0) 1617 if (err < 0)
@@ -1667,7 +1667,7 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1667 } 1667 }
1668 1668
1669 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1669 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1670 rp = kmalloc(rp_len, GFP_ATOMIC); 1670 rp = kmalloc(rp_len, GFP_KERNEL);
1671 if (!rp) { 1671 if (!rp) {
1672 err = -ENOMEM; 1672 err = -ENOMEM;
1673 goto unlock; 1673 goto unlock;
@@ -1778,29 +1778,6 @@ failed:
1778 return err; 1778 return err;
1779} 1779}
1780 1780
1781static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1782 void *data, u16 len)
1783{
1784 struct mgmt_cp_pin_code_neg_reply *cp = data;
1785 int err;
1786
1787 BT_DBG("");
1788
1789 hci_dev_lock(hdev);
1790
1791 if (!hdev_is_powered(hdev)) {
1792 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
1793 MGMT_STATUS_NOT_POWERED);
1794 goto failed;
1795 }
1796
1797 err = send_pin_code_neg_reply(sk, hdev, cp);
1798
1799failed:
1800 hci_dev_unlock(hdev);
1801 return err;
1802}
1803
1804static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, 1781static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1805 u16 len) 1782 u16 len)
1806{ 1783{
@@ -1813,7 +1790,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1813 hdev->io_capability = cp->io_capability; 1790 hdev->io_capability = cp->io_capability;
1814 1791
1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1792 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816 hdev->io_capability); 1793 hdev->io_capability);
1817 1794
1818 hci_dev_unlock(hdev); 1795 hci_dev_unlock(hdev);
1819 1796
@@ -1821,7 +1798,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1821 0); 1798 0);
1822} 1799}
1823 1800
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1801static struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{ 1802{
1826 struct hci_dev *hdev = conn->hdev; 1803 struct hci_dev *hdev = conn->hdev;
1827 struct pending_cmd *cmd; 1804 struct pending_cmd *cmd;
@@ -1927,8 +1904,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1927 rp.addr.type = cp->addr.type; 1904 rp.addr.type = cp->addr.type;
1928 1905
1929 if (IS_ERR(conn)) { 1906 if (IS_ERR(conn)) {
1907 int status;
1908
1909 if (PTR_ERR(conn) == -EBUSY)
1910 status = MGMT_STATUS_BUSY;
1911 else
1912 status = MGMT_STATUS_CONNECT_FAILED;
1913
1930 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1931 MGMT_STATUS_CONNECT_FAILED, &rp, 1915 status, &rp,
1932 sizeof(rp)); 1916 sizeof(rp));
1933 goto unlock; 1917 goto unlock;
1934 } 1918 }
@@ -1959,7 +1943,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1959 cmd->user_data = conn; 1943 cmd->user_data = conn;
1960 1944
1961 if (conn->state == BT_CONNECTED && 1945 if (conn->state == BT_CONNECTED &&
1962 hci_conn_security(conn, sec_level, auth_type)) 1946 hci_conn_security(conn, sec_level, auth_type))
1963 pairing_complete(cmd, 0); 1947 pairing_complete(cmd, 0);
1964 1948
1965 err = 0; 1949 err = 0;
@@ -2076,6 +2060,18 @@ done:
2076 return err; 2060 return err;
2077} 2061}
2078 2062
2063static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2064 void *data, u16 len)
2065{
2066 struct mgmt_cp_pin_code_neg_reply *cp = data;
2067
2068 BT_DBG("");
2069
2070 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2071 MGMT_OP_PIN_CODE_NEG_REPLY,
2072 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2073}
2074
2079static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, 2075static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2080 u16 len) 2076 u16 len)
2081{ 2077{
@@ -2256,7 +2252,7 @@ unlock:
2256} 2252}
2257 2253
2258static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2254static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2259 void *data, u16 len) 2255 void *data, u16 len)
2260{ 2256{
2261 struct mgmt_cp_remove_remote_oob_data *cp = data; 2257 struct mgmt_cp_remove_remote_oob_data *cp = data;
2262 u8 status; 2258 u8 status;
@@ -2425,7 +2421,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2425 2421
2426 case DISCOVERY_RESOLVING: 2422 case DISCOVERY_RESOLVING:
2427 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2423 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2428 NAME_PENDING); 2424 NAME_PENDING);
2429 if (!e) { 2425 if (!e) {
2430 mgmt_pending_remove(cmd); 2426 mgmt_pending_remove(cmd);
2431 err = cmd_complete(sk, hdev->id, 2427 err = cmd_complete(sk, hdev->id,
@@ -2600,8 +2596,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2600 if (cp->val) { 2596 if (cp->val) {
2601 type = PAGE_SCAN_TYPE_INTERLACED; 2597 type = PAGE_SCAN_TYPE_INTERLACED;
2602 2598
2603 /* 22.5 msec page scan interval */ 2599 /* 160 msec page scan interval */
2604 acp.interval = __constant_cpu_to_le16(0x0024); 2600 acp.interval = __constant_cpu_to_le16(0x0100);
2605 } else { 2601 } else {
2606 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 2602 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2607 2603
@@ -2647,7 +2643,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2647 sizeof(struct mgmt_ltk_info); 2643 sizeof(struct mgmt_ltk_info);
2648 if (expected_len != len) { 2644 if (expected_len != len) {
2649 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2645 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2650 len, expected_len); 2646 len, expected_len);
2651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2647 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2652 EINVAL); 2648 EINVAL);
2653 } 2649 }
@@ -2772,7 +2768,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2772 } 2768 }
2773 2769
2774 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 2770 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2775 mgmt_handlers[opcode].func == NULL) { 2771 mgmt_handlers[opcode].func == NULL) {
2776 BT_DBG("Unknown op %u", opcode); 2772 BT_DBG("Unknown op %u", opcode);
2777 err = cmd_status(sk, index, opcode, 2773 err = cmd_status(sk, index, opcode,
2778 MGMT_STATUS_UNKNOWN_COMMAND); 2774 MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2780,7 +2776,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2780 } 2776 }
2781 2777
2782 if ((hdev && opcode < MGMT_OP_READ_INFO) || 2778 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2783 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 2779 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2784 err = cmd_status(sk, index, opcode, 2780 err = cmd_status(sk, index, opcode,
2785 MGMT_STATUS_INVALID_INDEX); 2781 MGMT_STATUS_INVALID_INDEX);
2786 goto done; 2782 goto done;
@@ -2789,7 +2785,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2789 handler = &mgmt_handlers[opcode]; 2785 handler = &mgmt_handlers[opcode];
2790 2786
2791 if ((handler->var_len && len < handler->data_len) || 2787 if ((handler->var_len && len < handler->data_len) ||
2792 (!handler->var_len && len != handler->data_len)) { 2788 (!handler->var_len && len != handler->data_len)) {
2793 err = cmd_status(sk, index, opcode, 2789 err = cmd_status(sk, index, opcode,
2794 MGMT_STATUS_INVALID_PARAMS); 2790 MGMT_STATUS_INVALID_PARAMS);
2795 goto done; 2791 goto done;
@@ -2973,7 +2969,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2973 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2969 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2974 ev.key.addr.type = BDADDR_BREDR; 2970 ev.key.addr.type = BDADDR_BREDR;
2975 ev.key.type = key->type; 2971 ev.key.type = key->type;
2976 memcpy(ev.key.val, key->val, 16); 2972 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
2977 ev.key.pin_len = key->pin_len; 2973 ev.key.pin_len = key->pin_len;
2978 2974
2979 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2975 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3108,7 +3104,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3108 mgmt_pending_remove(cmd); 3104 mgmt_pending_remove(cmd);
3109 3105
3110 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3106 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3111 hdev); 3107 hdev);
3112 return err; 3108 return err;
3113} 3109}
3114 3110
@@ -3198,7 +3194,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3198} 3194}
3199 3195
3200int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3196int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3201 u8 link_type, u8 addr_type) 3197 u8 link_type, u8 addr_type)
3202{ 3198{
3203 struct mgmt_ev_user_passkey_request ev; 3199 struct mgmt_ev_user_passkey_request ev;
3204 3200
@@ -3212,8 +3208,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3212} 3208}
3213 3209
3214static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3210static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3215 u8 link_type, u8 addr_type, u8 status, 3211 u8 link_type, u8 addr_type, u8 status,
3216 u8 opcode) 3212 u8 opcode)
3217{ 3213{
3218 struct pending_cmd *cmd; 3214 struct pending_cmd *cmd;
3219 struct mgmt_rp_user_confirm_reply rp; 3215 struct mgmt_rp_user_confirm_reply rp;
@@ -3244,7 +3240,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 link_type, u8 addr_type, u8 status) 3240 u8 link_type, u8 addr_type, u8 status)
3245{ 3241{
3246 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3247 status, MGMT_OP_USER_CONFIRM_NEG_REPLY); 3243 status,
3244 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3248} 3245}
3249 3246
3250int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3247int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3258,7 +3255,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258 u8 link_type, u8 addr_type, u8 status) 3255 u8 link_type, u8 addr_type, u8 status)
3259{ 3256{
3260 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3257 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3261 status, MGMT_OP_USER_PASSKEY_NEG_REPLY); 3258 status,
3259 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3262} 3260}
3263 3261
3264int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3262int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -3537,9 +3535,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3537 ev->addr.type = link_to_bdaddr(link_type, addr_type); 3535 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3538 ev->rssi = rssi; 3536 ev->rssi = rssi;
3539 if (cfm_name) 3537 if (cfm_name)
3540 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME; 3538 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3541 if (!ssp) 3539 if (!ssp)
3542 ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING; 3540 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3543 3541
3544 if (eir_len > 0) 3542 if (eir_len > 0)
3545 memcpy(ev->eir, eir, eir_len); 3543 memcpy(ev->eir, eir, eir_len);
@@ -3549,7 +3547,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3549 dev_class, 3); 3547 dev_class, 3);
3550 3548
3551 ev->eir_len = cpu_to_le16(eir_len); 3549 ev->eir_len = cpu_to_le16(eir_len);
3552
3553 ev_size = sizeof(*ev) + eir_len; 3550 ev_size = sizeof(*ev) + eir_len;
3554 3551
3555 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); 3552 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e7..c75107ef8920 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/device.h>
36#include <linux/debugfs.h> 29#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38#include <linux/net.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 30#include <linux/kthread.h>
41#include <linux/slab.h>
42
43#include <net/sock.h>
44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 31#include <asm/unaligned.h>
46 32
47#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 101#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
116#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 102#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
117 103
118static inline void rfcomm_schedule(void) 104static void rfcomm_schedule(void)
119{ 105{
120 if (!rfcomm_thread) 106 if (!rfcomm_thread)
121 return; 107 return;
122 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
123} 109}
124 110
125static inline void rfcomm_session_put(struct rfcomm_session *s) 111static void rfcomm_session_put(struct rfcomm_session *s)
126{ 112{
127 if (atomic_dec_and_test(&s->refcnt)) 113 if (atomic_dec_and_test(&s->refcnt))
128 rfcomm_session_del(s); 114 rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
227 return err; 213 return err;
228} 214}
229 215
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 216static int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 217{
232 struct sock *sk = d->session->sock->sk; 218 struct sock *sk = d->session->sock->sk;
233 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 219 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
1750/* Send data queued for the DLC. 1736/* Send data queued for the DLC.
1751 * Return number of frames left in the queue. 1737 * Return number of frames left in the queue.
1752 */ 1738 */
1753static inline int rfcomm_process_tx(struct rfcomm_dlc *d) 1739static int rfcomm_process_tx(struct rfcomm_dlc *d)
1754{ 1740{
1755 struct sk_buff *skb; 1741 struct sk_buff *skb;
1756 int err; 1742 int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1798 return skb_queue_len(&d->tx_queue); 1784 return skb_queue_len(&d->tx_queue);
1799} 1785}
1800 1786
1801static inline void rfcomm_process_dlcs(struct rfcomm_session *s) 1787static void rfcomm_process_dlcs(struct rfcomm_session *s)
1802{ 1788{
1803 struct rfcomm_dlc *d; 1789 struct rfcomm_dlc *d;
1804 struct list_head *p, *n; 1790 struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1858 } 1844 }
1859} 1845}
1860 1846
1861static inline void rfcomm_process_rx(struct rfcomm_session *s) 1847static void rfcomm_process_rx(struct rfcomm_session *s)
1862{ 1848{
1863 struct socket *sock = s->sock; 1849 struct socket *sock = s->sock;
1864 struct sock *sk = sock->sk; 1850 struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1883 } 1869 }
1884} 1870}
1885 1871
1886static inline void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
1887{ 1873{
1888 struct socket *sock = s->sock, *nsock; 1874 struct socket *sock = s->sock, *nsock;
1889 int err; 1875 int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1917 sock_release(nsock); 1903 sock_release(nsock);
1918} 1904}
1919 1905
1920static inline void rfcomm_check_connection(struct rfcomm_session *s) 1906static void rfcomm_check_connection(struct rfcomm_session *s)
1921{ 1907{
1922 struct sock *sk = s->sock->sk; 1908 struct sock *sk = s->sock->sk;
1923 1909
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1941 } 1927 }
1942} 1928}
1943 1929
1944static inline void rfcomm_process_sessions(void) 1930static void rfcomm_process_sessions(void)
1945{ 1931{
1946 struct list_head *p, *n; 1932 struct list_head *p, *n;
1947 1933
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb864..7e1e59645c05 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h> 29#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/security.h>
46#include <net/sock.h>
47
48#include <linux/uaccess.h>
49 30
50#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14aee..cb960773c002 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
31#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#include <linux/capability.h>
35#include <linux/slab.h>
36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
38
39#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 36#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
132 return NULL; 127 return NULL;
133} 128}
134 129
135static inline struct rfcomm_dev *rfcomm_dev_get(int id) 130static struct rfcomm_dev *rfcomm_dev_get(int id)
136{ 131{
137 struct rfcomm_dev *dev; 132 struct rfcomm_dev *dev;
138 133
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
345 tty_port_put(&dev->port); 340 tty_port_put(&dev->port);
346} 341}
347 342
348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 343static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
349{ 344{
350 tty_port_get(&dev->port); 345 tty_port_get(&dev->port);
351 atomic_add(skb->truesize, &dev->wmem_alloc); 346 atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a7..40bbe25dcff7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
25/* Bluetooth SCO sockets. */ 25/* Bluetooth SCO sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/socket.h>
39#include <linux/skbuff.h>
40#include <linux/device.h>
41#include <linux/debugfs.h> 28#include <linux/debugfs.h>
42#include <linux/seq_file.h> 29#include <linux/seq_file.h>
43#include <linux/list.h>
44#include <linux/security.h>
45#include <net/sock.h>
46
47#include <linux/uaccess.h>
48 30
49#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
123 return conn; 105 return conn;
124} 106}
125 107
126static inline struct sock *sco_chan_get(struct sco_conn *conn) 108static struct sock *sco_chan_get(struct sco_conn *conn)
127{ 109{
128 struct sock *sk = NULL; 110 struct sock *sk = NULL;
129 sco_conn_lock(conn); 111 sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
157 return 0; 139 return 0;
158} 140}
159 141
160static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) 142static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
143 struct sock *parent)
161{ 144{
162 int err = 0; 145 int err = 0;
163 146
@@ -228,7 +211,7 @@ done:
228 return err; 211 return err;
229} 212}
230 213
231static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 214static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
232{ 215{
233 struct sco_conn *conn = sco_pi(sk)->conn; 216 struct sco_conn *conn = sco_pi(sk)->conn;
234 struct sk_buff *skb; 217 struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
254 return len; 237 return len;
255} 238}
256 239
257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 240static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
258{ 241{
259 struct sock *sk = sco_chan_get(conn); 242 struct sock *sk = sco_chan_get(conn);
260 243
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
523 goto done; 506 goto done;
524 507
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 508 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 509 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 510
528done: 511done:
529 release_sock(sk); 512 release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
788 771
789 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 772 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
790 err = bt_sock_wait_state(sk, BT_CLOSED, 773 err = bt_sock_wait_state(sk, BT_CLOSED,
791 sk->sk_lingertime); 774 sk->sk_lingertime);
792 } 775 }
793 release_sock(sk); 776 release_sock(sk);
794 return err; 777 return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
878 bh_lock_sock(parent); 861 bh_lock_sock(parent);
879 862
880 sk = sco_sock_alloc(sock_net(parent), NULL, 863 sk = sco_sock_alloc(sock_net(parent), NULL,
881 BTPROTO_SCO, GFP_ATOMIC); 864 BTPROTO_SCO, GFP_ATOMIC);
882 if (!sk) { 865 if (!sk) {
883 bh_unlock_sock(parent); 866 bh_unlock_sock(parent);
884 goto done; 867 goto done;
@@ -907,7 +890,7 @@ done:
907/* ----- SCO interface with lower layer (HCI) ----- */ 890/* ----- SCO interface with lower layer (HCI) ----- */
908int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 891int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
909{ 892{
910 register struct sock *sk; 893 struct sock *sk;
911 struct hlist_node *node; 894 struct hlist_node *node;
912 int lm = 0; 895 int lm = 0;
913 896
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
920 continue; 903 continue;
921 904
922 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
923 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 906 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
924 lm |= HCI_LM_ACCEPT; 907 lm |= HCI_LM_ACCEPT;
925 break; 908 break;
926 } 909 }
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
981 964
982 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
983 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
984 batostr(&bt_sk(sk)->dst), sk->sk_state); 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
985 } 968 }
986 969
987 read_unlock(&sco_sk_list.lock); 970 read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
1044 } 1027 }
1045 1028
1046 if (bt_debugfs) { 1029 if (bt_debugfs) {
1047 sco_debugfs = debugfs_create_file("sco", 0444, 1030 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1048 bt_debugfs, NULL, &sco_debugfs_fops); 1031 NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs) 1032 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file"); 1033 BT_ERR("Failed to create SCO debug file");
1051 } 1034 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 37df4e9b3896..16ef0dc85a0a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <crypto/b128ops.h>
26
23#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h> 28#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h> 29#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/mgmt.h> 30#include <net/bluetooth/mgmt.h>
27#include <net/bluetooth/smp.h> 31#include <net/bluetooth/smp.h>
28#include <linux/crypto.h>
29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h>
31 32
32#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 34