diff options
Diffstat (limited to 'net')
37 files changed, 3089 insertions, 1716 deletions
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 2dc5a5700f53..fa6d94a4602a 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile | |||
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/ | |||
9 | obj-$(CONFIG_BT_HIDP) += hidp/ | 9 | obj-$(CONFIG_BT_HIDP) += hidp/ |
10 | 10 | ||
11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ | 11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ |
12 | hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o | 12 | hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ |
13 | a2mp.o | ||
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c new file mode 100644 index 000000000000..fb93250b3938 --- /dev/null +++ b/net/bluetooth/a2mp.c | |||
@@ -0,0 +1,568 @@ | |||
1 | /* | ||
2 | Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. | ||
3 | Copyright (c) 2011,2012 Intel Corp. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License version 2 and | ||
7 | only version 2 as published by the Free Software Foundation. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, | ||
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <net/bluetooth/bluetooth.h> | ||
16 | #include <net/bluetooth/hci_core.h> | ||
17 | #include <net/bluetooth/l2cap.h> | ||
18 | #include <net/bluetooth/a2mp.h> | ||
19 | |||
20 | /* A2MP build & send command helper functions */ | ||
21 | static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) | ||
22 | { | ||
23 | struct a2mp_cmd *cmd; | ||
24 | int plen; | ||
25 | |||
26 | plen = sizeof(*cmd) + len; | ||
27 | cmd = kzalloc(plen, GFP_KERNEL); | ||
28 | if (!cmd) | ||
29 | return NULL; | ||
30 | |||
31 | cmd->code = code; | ||
32 | cmd->ident = ident; | ||
33 | cmd->len = cpu_to_le16(len); | ||
34 | |||
35 | memcpy(cmd->data, data, len); | ||
36 | |||
37 | return cmd; | ||
38 | } | ||
39 | |||
40 | static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, | ||
41 | void *data) | ||
42 | { | ||
43 | struct l2cap_chan *chan = mgr->a2mp_chan; | ||
44 | struct a2mp_cmd *cmd; | ||
45 | u16 total_len = len + sizeof(*cmd); | ||
46 | struct kvec iv; | ||
47 | struct msghdr msg; | ||
48 | |||
49 | cmd = __a2mp_build(code, ident, len, data); | ||
50 | if (!cmd) | ||
51 | return; | ||
52 | |||
53 | iv.iov_base = cmd; | ||
54 | iv.iov_len = total_len; | ||
55 | |||
56 | memset(&msg, 0, sizeof(msg)); | ||
57 | |||
58 | msg.msg_iov = (struct iovec *) &iv; | ||
59 | msg.msg_iovlen = 1; | ||
60 | |||
61 | l2cap_chan_send(chan, &msg, total_len, 0); | ||
62 | |||
63 | kfree(cmd); | ||
64 | } | ||
65 | |||
66 | static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) | ||
67 | { | ||
68 | cl->id = 0; | ||
69 | cl->type = 0; | ||
70 | cl->status = 1; | ||
71 | } | ||
72 | |||
73 | /* hci_dev_list shall be locked */ | ||
74 | static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl) | ||
75 | { | ||
76 | int i = 0; | ||
77 | struct hci_dev *hdev; | ||
78 | |||
79 | __a2mp_cl_bredr(cl); | ||
80 | |||
81 | list_for_each_entry(hdev, &hci_dev_list, list) { | ||
82 | /* Iterate through AMP controllers */ | ||
83 | if (hdev->id == HCI_BREDR_ID) | ||
84 | continue; | ||
85 | |||
86 | /* Starting from second entry */ | ||
87 | if (++i >= num_ctrl) | ||
88 | return; | ||
89 | |||
90 | cl[i].id = hdev->id; | ||
91 | cl[i].type = hdev->amp_type; | ||
92 | cl[i].status = hdev->amp_status; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* Processing A2MP messages */ | ||
97 | static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, | ||
98 | struct a2mp_cmd *hdr) | ||
99 | { | ||
100 | struct a2mp_cmd_rej *rej = (void *) skb->data; | ||
101 | |||
102 | if (le16_to_cpu(hdr->len) < sizeof(*rej)) | ||
103 | return -EINVAL; | ||
104 | |||
105 | BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason)); | ||
106 | |||
107 | skb_pull(skb, sizeof(*rej)); | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, | ||
113 | struct a2mp_cmd *hdr) | ||
114 | { | ||
115 | struct a2mp_discov_req *req = (void *) skb->data; | ||
116 | u16 len = le16_to_cpu(hdr->len); | ||
117 | struct a2mp_discov_rsp *rsp; | ||
118 | u16 ext_feat; | ||
119 | u8 num_ctrl; | ||
120 | |||
121 | if (len < sizeof(*req)) | ||
122 | return -EINVAL; | ||
123 | |||
124 | skb_pull(skb, sizeof(*req)); | ||
125 | |||
126 | ext_feat = le16_to_cpu(req->ext_feat); | ||
127 | |||
128 | BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); | ||
129 | |||
130 | /* check that packet is not broken for now */ | ||
131 | while (ext_feat & A2MP_FEAT_EXT) { | ||
132 | if (len < sizeof(ext_feat)) | ||
133 | return -EINVAL; | ||
134 | |||
135 | ext_feat = get_unaligned_le16(skb->data); | ||
136 | BT_DBG("efm 0x%4.4x", ext_feat); | ||
137 | len -= sizeof(ext_feat); | ||
138 | skb_pull(skb, sizeof(ext_feat)); | ||
139 | } | ||
140 | |||
141 | read_lock(&hci_dev_list_lock); | ||
142 | |||
143 | num_ctrl = __hci_num_ctrl(); | ||
144 | len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp); | ||
145 | rsp = kmalloc(len, GFP_ATOMIC); | ||
146 | if (!rsp) { | ||
147 | read_unlock(&hci_dev_list_lock); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | |||
151 | rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); | ||
152 | rsp->ext_feat = 0; | ||
153 | |||
154 | __a2mp_add_cl(mgr, rsp->cl, num_ctrl); | ||
155 | |||
156 | read_unlock(&hci_dev_list_lock); | ||
157 | |||
158 | a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); | ||
159 | |||
160 | kfree(rsp); | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, | ||
165 | struct a2mp_cmd *hdr) | ||
166 | { | ||
167 | struct a2mp_cl *cl = (void *) skb->data; | ||
168 | |||
169 | while (skb->len >= sizeof(*cl)) { | ||
170 | BT_DBG("Controller id %d type %d status %d", cl->id, cl->type, | ||
171 | cl->status); | ||
172 | cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl)); | ||
173 | } | ||
174 | |||
175 | /* TODO send A2MP_CHANGE_RSP */ | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, | ||
181 | struct a2mp_cmd *hdr) | ||
182 | { | ||
183 | struct a2mp_info_req *req = (void *) skb->data; | ||
184 | struct a2mp_info_rsp rsp; | ||
185 | struct hci_dev *hdev; | ||
186 | |||
187 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | ||
188 | return -EINVAL; | ||
189 | |||
190 | BT_DBG("id %d", req->id); | ||
191 | |||
192 | rsp.id = req->id; | ||
193 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
194 | |||
195 | hdev = hci_dev_get(req->id); | ||
196 | if (hdev && hdev->amp_type != HCI_BREDR) { | ||
197 | rsp.status = 0; | ||
198 | rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); | ||
199 | rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); | ||
200 | rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); | ||
201 | rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); | ||
202 | rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); | ||
203 | } | ||
204 | |||
205 | if (hdev) | ||
206 | hci_dev_put(hdev); | ||
207 | |||
208 | a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp); | ||
209 | |||
210 | skb_pull(skb, sizeof(*req)); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, | ||
215 | struct a2mp_cmd *hdr) | ||
216 | { | ||
217 | struct a2mp_amp_assoc_req *req = (void *) skb->data; | ||
218 | struct hci_dev *hdev; | ||
219 | |||
220 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | ||
221 | return -EINVAL; | ||
222 | |||
223 | BT_DBG("id %d", req->id); | ||
224 | |||
225 | hdev = hci_dev_get(req->id); | ||
226 | if (!hdev || hdev->amp_type == HCI_BREDR) { | ||
227 | struct a2mp_amp_assoc_rsp rsp; | ||
228 | rsp.id = req->id; | ||
229 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
230 | |||
231 | a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), | ||
232 | &rsp); | ||
233 | goto clean; | ||
234 | } | ||
235 | |||
236 | /* Placeholder for HCI Read AMP Assoc */ | ||
237 | |||
238 | clean: | ||
239 | if (hdev) | ||
240 | hci_dev_put(hdev); | ||
241 | |||
242 | skb_pull(skb, sizeof(*req)); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | ||
247 | struct a2mp_cmd *hdr) | ||
248 | { | ||
249 | struct a2mp_physlink_req *req = (void *) skb->data; | ||
250 | |||
251 | struct a2mp_physlink_rsp rsp; | ||
252 | struct hci_dev *hdev; | ||
253 | |||
254 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | ||
255 | return -EINVAL; | ||
256 | |||
257 | BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); | ||
258 | |||
259 | rsp.local_id = req->remote_id; | ||
260 | rsp.remote_id = req->local_id; | ||
261 | |||
262 | hdev = hci_dev_get(req->remote_id); | ||
263 | if (!hdev || hdev->amp_type != HCI_AMP) { | ||
264 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
265 | goto send_rsp; | ||
266 | } | ||
267 | |||
268 | /* TODO process physlink create */ | ||
269 | |||
270 | rsp.status = A2MP_STATUS_SUCCESS; | ||
271 | |||
272 | send_rsp: | ||
273 | if (hdev) | ||
274 | hci_dev_put(hdev); | ||
275 | |||
276 | a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), | ||
277 | &rsp); | ||
278 | |||
279 | skb_pull(skb, le16_to_cpu(hdr->len)); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | ||
284 | struct a2mp_cmd *hdr) | ||
285 | { | ||
286 | struct a2mp_physlink_req *req = (void *) skb->data; | ||
287 | struct a2mp_physlink_rsp rsp; | ||
288 | struct hci_dev *hdev; | ||
289 | |||
290 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); | ||
294 | |||
295 | rsp.local_id = req->remote_id; | ||
296 | rsp.remote_id = req->local_id; | ||
297 | rsp.status = A2MP_STATUS_SUCCESS; | ||
298 | |||
299 | hdev = hci_dev_get(req->local_id); | ||
300 | if (!hdev) { | ||
301 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
302 | goto send_rsp; | ||
303 | } | ||
304 | |||
305 | /* TODO Disconnect Phys Link here */ | ||
306 | |||
307 | hci_dev_put(hdev); | ||
308 | |||
309 | send_rsp: | ||
310 | a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); | ||
311 | |||
312 | skb_pull(skb, sizeof(*req)); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, | ||
317 | struct a2mp_cmd *hdr) | ||
318 | { | ||
319 | BT_DBG("ident %d code %d", hdr->ident, hdr->code); | ||
320 | |||
321 | skb_pull(skb, le16_to_cpu(hdr->len)); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | /* Handle A2MP signalling */ | ||
326 | static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) | ||
327 | { | ||
328 | struct a2mp_cmd *hdr = (void *) skb->data; | ||
329 | struct amp_mgr *mgr = chan->data; | ||
330 | int err = 0; | ||
331 | |||
332 | amp_mgr_get(mgr); | ||
333 | |||
334 | while (skb->len >= sizeof(*hdr)) { | ||
335 | struct a2mp_cmd *hdr = (void *) skb->data; | ||
336 | u16 len = le16_to_cpu(hdr->len); | ||
337 | |||
338 | BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len); | ||
339 | |||
340 | skb_pull(skb, sizeof(*hdr)); | ||
341 | |||
342 | if (len > skb->len || !hdr->ident) { | ||
343 | err = -EINVAL; | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | mgr->ident = hdr->ident; | ||
348 | |||
349 | switch (hdr->code) { | ||
350 | case A2MP_COMMAND_REJ: | ||
351 | a2mp_command_rej(mgr, skb, hdr); | ||
352 | break; | ||
353 | |||
354 | case A2MP_DISCOVER_REQ: | ||
355 | err = a2mp_discover_req(mgr, skb, hdr); | ||
356 | break; | ||
357 | |||
358 | case A2MP_CHANGE_NOTIFY: | ||
359 | err = a2mp_change_notify(mgr, skb, hdr); | ||
360 | break; | ||
361 | |||
362 | case A2MP_GETINFO_REQ: | ||
363 | err = a2mp_getinfo_req(mgr, skb, hdr); | ||
364 | break; | ||
365 | |||
366 | case A2MP_GETAMPASSOC_REQ: | ||
367 | err = a2mp_getampassoc_req(mgr, skb, hdr); | ||
368 | break; | ||
369 | |||
370 | case A2MP_CREATEPHYSLINK_REQ: | ||
371 | err = a2mp_createphyslink_req(mgr, skb, hdr); | ||
372 | break; | ||
373 | |||
374 | case A2MP_DISCONNPHYSLINK_REQ: | ||
375 | err = a2mp_discphyslink_req(mgr, skb, hdr); | ||
376 | break; | ||
377 | |||
378 | case A2MP_CHANGE_RSP: | ||
379 | case A2MP_DISCOVER_RSP: | ||
380 | case A2MP_GETINFO_RSP: | ||
381 | case A2MP_GETAMPASSOC_RSP: | ||
382 | case A2MP_CREATEPHYSLINK_RSP: | ||
383 | case A2MP_DISCONNPHYSLINK_RSP: | ||
384 | err = a2mp_cmd_rsp(mgr, skb, hdr); | ||
385 | break; | ||
386 | |||
387 | default: | ||
388 | BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); | ||
389 | err = -EINVAL; | ||
390 | break; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | if (err) { | ||
395 | struct a2mp_cmd_rej rej; | ||
396 | rej.reason = __constant_cpu_to_le16(0); | ||
397 | |||
398 | BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); | ||
399 | |||
400 | a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), | ||
401 | &rej); | ||
402 | } | ||
403 | |||
404 | /* Always free skb and return success error code to prevent | ||
405 | from sending L2CAP Disconnect over A2MP channel */ | ||
406 | kfree_skb(skb); | ||
407 | |||
408 | amp_mgr_put(mgr); | ||
409 | |||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | static void a2mp_chan_close_cb(struct l2cap_chan *chan) | ||
414 | { | ||
415 | l2cap_chan_destroy(chan); | ||
416 | } | ||
417 | |||
418 | static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) | ||
419 | { | ||
420 | struct amp_mgr *mgr = chan->data; | ||
421 | |||
422 | if (!mgr) | ||
423 | return; | ||
424 | |||
425 | BT_DBG("chan %p state %s", chan, state_to_string(state)); | ||
426 | |||
427 | chan->state = state; | ||
428 | |||
429 | switch (state) { | ||
430 | case BT_CLOSED: | ||
431 | if (mgr) | ||
432 | amp_mgr_put(mgr); | ||
433 | break; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, | ||
438 | unsigned long len, int nb) | ||
439 | { | ||
440 | return bt_skb_alloc(len, GFP_KERNEL); | ||
441 | } | ||
442 | |||
443 | static struct l2cap_ops a2mp_chan_ops = { | ||
444 | .name = "L2CAP A2MP channel", | ||
445 | .recv = a2mp_chan_recv_cb, | ||
446 | .close = a2mp_chan_close_cb, | ||
447 | .state_change = a2mp_chan_state_change_cb, | ||
448 | .alloc_skb = a2mp_chan_alloc_skb_cb, | ||
449 | |||
450 | /* Not implemented for A2MP */ | ||
451 | .new_connection = l2cap_chan_no_new_connection, | ||
452 | .teardown = l2cap_chan_no_teardown, | ||
453 | .ready = l2cap_chan_no_ready, | ||
454 | }; | ||
455 | |||
456 | static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) | ||
457 | { | ||
458 | struct l2cap_chan *chan; | ||
459 | int err; | ||
460 | |||
461 | chan = l2cap_chan_create(); | ||
462 | if (!chan) | ||
463 | return NULL; | ||
464 | |||
465 | BT_DBG("chan %p", chan); | ||
466 | |||
467 | chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP; | ||
468 | chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; | ||
469 | |||
470 | chan->ops = &a2mp_chan_ops; | ||
471 | |||
472 | l2cap_chan_set_defaults(chan); | ||
473 | chan->remote_max_tx = chan->max_tx; | ||
474 | chan->remote_tx_win = chan->tx_win; | ||
475 | |||
476 | chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; | ||
477 | chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; | ||
478 | |||
479 | skb_queue_head_init(&chan->tx_q); | ||
480 | |||
481 | chan->mode = L2CAP_MODE_ERTM; | ||
482 | |||
483 | err = l2cap_ertm_init(chan); | ||
484 | if (err < 0) { | ||
485 | l2cap_chan_del(chan, 0); | ||
486 | return NULL; | ||
487 | } | ||
488 | |||
489 | chan->conf_state = 0; | ||
490 | |||
491 | l2cap_chan_add(conn, chan); | ||
492 | |||
493 | chan->remote_mps = chan->omtu; | ||
494 | chan->mps = chan->omtu; | ||
495 | |||
496 | chan->state = BT_CONNECTED; | ||
497 | |||
498 | return chan; | ||
499 | } | ||
500 | |||
501 | /* AMP Manager functions */ | ||
502 | void amp_mgr_get(struct amp_mgr *mgr) | ||
503 | { | ||
504 | BT_DBG("mgr %p", mgr); | ||
505 | |||
506 | kref_get(&mgr->kref); | ||
507 | } | ||
508 | |||
509 | static void amp_mgr_destroy(struct kref *kref) | ||
510 | { | ||
511 | struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); | ||
512 | |||
513 | BT_DBG("mgr %p", mgr); | ||
514 | |||
515 | kfree(mgr); | ||
516 | } | ||
517 | |||
518 | int amp_mgr_put(struct amp_mgr *mgr) | ||
519 | { | ||
520 | BT_DBG("mgr %p", mgr); | ||
521 | |||
522 | return kref_put(&mgr->kref, &_mgr_destroy); | ||
523 | } | ||
524 | |||
525 | static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) | ||
526 | { | ||
527 | struct amp_mgr *mgr; | ||
528 | struct l2cap_chan *chan; | ||
529 | |||
530 | mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); | ||
531 | if (!mgr) | ||
532 | return NULL; | ||
533 | |||
534 | BT_DBG("conn %p mgr %p", conn, mgr); | ||
535 | |||
536 | mgr->l2cap_conn = conn; | ||
537 | |||
538 | chan = a2mp_chan_open(conn); | ||
539 | if (!chan) { | ||
540 | kfree(mgr); | ||
541 | return NULL; | ||
542 | } | ||
543 | |||
544 | mgr->a2mp_chan = chan; | ||
545 | chan->data = mgr; | ||
546 | |||
547 | conn->hcon->amp_mgr = mgr; | ||
548 | |||
549 | kref_init(&mgr->kref); | ||
550 | |||
551 | return mgr; | ||
552 | } | ||
553 | |||
554 | struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, | ||
555 | struct sk_buff *skb) | ||
556 | { | ||
557 | struct amp_mgr *mgr; | ||
558 | |||
559 | mgr = amp_mgr_create(conn); | ||
560 | if (!mgr) { | ||
561 | BT_ERR("Could not create AMP manager"); | ||
562 | return NULL; | ||
563 | } | ||
564 | |||
565 | BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); | ||
566 | |||
567 | return mgr->a2mp_chan; | ||
568 | } | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 3e18af4dadc4..f7db5792ec64 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -25,18 +25,7 @@ | |||
25 | /* Bluetooth address family and sockets. */ | 25 | /* Bluetooth address family and sockets. */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/poll.h> | ||
37 | #include <net/sock.h> | ||
38 | #include <asm/ioctls.h> | 28 | #include <asm/ioctls.h> |
39 | #include <linux/kmod.h> | ||
40 | 29 | ||
41 | #include <net/bluetooth/bluetooth.h> | 30 | #include <net/bluetooth/bluetooth.h> |
42 | 31 | ||
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent) | |||
418 | return 0; | 407 | return 0; |
419 | } | 408 | } |
420 | 409 | ||
421 | unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) | 410 | unsigned int bt_sock_poll(struct file *file, struct socket *sock, |
411 | poll_table *wait) | ||
422 | { | 412 | { |
423 | struct sock *sk = sock->sk; | 413 | struct sock *sk = sock->sk; |
424 | unsigned int mask = 0; | 414 | unsigned int mask = 0; |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 031d7d656754..4a6620bc1570 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -26,26 +26,9 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/signal.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/wait.h> | ||
35 | #include <linux/freezer.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/net.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
40 | #include <net/sock.h> | ||
41 | |||
42 | #include <linux/socket.h> | ||
43 | #include <linux/file.h> | 30 | #include <linux/file.h> |
44 | |||
45 | #include <linux/netdevice.h> | ||
46 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
47 | #include <linux/skbuff.h> | ||
48 | |||
49 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
50 | 33 | ||
51 | #include <net/bluetooth/bluetooth.h> | 34 | #include <net/bluetooth/bluetooth.h> |
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = { | |||
306 | ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ | 289 | ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ |
307 | }; | 290 | }; |
308 | 291 | ||
309 | static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | 292 | static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) |
310 | { | 293 | { |
311 | struct net_device *dev = s->dev; | 294 | struct net_device *dev = s->dev; |
312 | struct sk_buff *nskb; | 295 | struct sk_buff *nskb; |
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = { | |||
404 | BNEP_COMPRESSED | 387 | BNEP_COMPRESSED |
405 | }; | 388 | }; |
406 | 389 | ||
407 | static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) | 390 | static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) |
408 | { | 391 | { |
409 | struct ethhdr *eh = (void *) skb->data; | 392 | struct ethhdr *eh = (void *) skb->data; |
410 | struct socket *sock = s->sock; | 393 | struct socket *sock = s->sock; |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index bc4086480d97..98f86f91d47c 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -25,16 +25,8 @@ | |||
25 | SOFTWARE IS DISCLAIMED. | 25 | SOFTWARE IS DISCLAIMED. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/export.h> |
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include <linux/socket.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/etherdevice.h> | 29 | #include <linux/etherdevice.h> |
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/wait.h> | ||
36 | |||
37 | #include <asm/unaligned.h> | ||
38 | 30 | ||
39 | #include <net/bluetooth/bluetooth.h> | 31 | #include <net/bluetooth/bluetooth.h> |
40 | #include <net/bluetooth/hci_core.h> | 32 | #include <net/bluetooth/hci_core.h> |
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev) | |||
128 | } | 120 | } |
129 | 121 | ||
130 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 122 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
131 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) | 123 | static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) |
132 | { | 124 | { |
133 | struct ethhdr *eh = (void *) skb->data; | 125 | struct ethhdr *eh = (void *) skb->data; |
134 | 126 | ||
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s | |||
140 | 132 | ||
141 | #ifdef CONFIG_BT_BNEP_PROTO_FILTER | 133 | #ifdef CONFIG_BT_BNEP_PROTO_FILTER |
142 | /* Determine ether protocol. Based on eth_type_trans. */ | 134 | /* Determine ether protocol. Based on eth_type_trans. */ |
143 | static inline u16 bnep_net_eth_proto(struct sk_buff *skb) | 135 | static u16 bnep_net_eth_proto(struct sk_buff *skb) |
144 | { | 136 | { |
145 | struct ethhdr *eh = (void *) skb->data; | 137 | struct ethhdr *eh = (void *) skb->data; |
146 | u16 proto = ntohs(eh->h_proto); | 138 | u16 proto = ntohs(eh->h_proto); |
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb) | |||
154 | return ETH_P_802_2; | 146 | return ETH_P_802_2; |
155 | } | 147 | } |
156 | 148 | ||
157 | static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) | 149 | static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) |
158 | { | 150 | { |
159 | u16 proto = bnep_net_eth_proto(skb); | 151 | u16 proto = bnep_net_eth_proto(skb); |
160 | struct bnep_proto_filter *f = s->proto_filter; | 152 | struct bnep_proto_filter *f = s->proto_filter; |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 180bfc45810d..5e5f5b410e0b 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -24,24 +24,8 @@ | |||
24 | SOFTWARE IS DISCLAIMED. | 24 | SOFTWARE IS DISCLAIMED. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/export.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/capability.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fcntl.h> | ||
35 | #include <linux/skbuff.h> | ||
36 | #include <linux/socket.h> | ||
37 | #include <linux/ioctl.h> | ||
38 | #include <linux/file.h> | 28 | #include <linux/file.h> |
39 | #include <linux/init.h> | ||
40 | #include <linux/compat.h> | ||
41 | #include <linux/gfp.h> | ||
42 | #include <linux/uaccess.h> | ||
43 | #include <net/sock.h> | ||
44 | |||
45 | 29 | ||
46 | #include "bnep.h" | 30 | #include "bnep.h" |
47 | 31 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3f18a6ed9731..2fcced377e50 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -24,24 +24,11 @@ | |||
24 | 24 | ||
25 | /* Bluetooth HCI connection handling. */ | 25 | /* Bluetooth HCI connection handling. */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/export.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fcntl.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <net/sock.h> | ||
39 | |||
40 | #include <linux/uaccess.h> | ||
41 | #include <asm/unaligned.h> | ||
42 | 28 | ||
43 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
44 | #include <net/bluetooth/hci_core.h> | 30 | #include <net/bluetooth/hci_core.h> |
31 | #include <net/bluetooth/a2mp.h> | ||
45 | 32 | ||
46 | static void hci_le_connect(struct hci_conn *conn) | 33 | static void hci_le_connect(struct hci_conn *conn) |
47 | { | 34 | { |
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn) | |||
54 | conn->sec_level = BT_SECURITY_LOW; | 41 | conn->sec_level = BT_SECURITY_LOW; |
55 | 42 | ||
56 | memset(&cp, 0, sizeof(cp)); | 43 | memset(&cp, 0, sizeof(cp)); |
57 | cp.scan_interval = cpu_to_le16(0x0060); | 44 | cp.scan_interval = __constant_cpu_to_le16(0x0060); |
58 | cp.scan_window = cpu_to_le16(0x0030); | 45 | cp.scan_window = __constant_cpu_to_le16(0x0030); |
59 | bacpy(&cp.peer_addr, &conn->dst); | 46 | bacpy(&cp.peer_addr, &conn->dst); |
60 | cp.peer_addr_type = conn->dst_type; | 47 | cp.peer_addr_type = conn->dst_type; |
61 | cp.conn_interval_min = cpu_to_le16(0x0028); | 48 | cp.conn_interval_min = __constant_cpu_to_le16(0x0028); |
62 | cp.conn_interval_max = cpu_to_le16(0x0038); | 49 | cp.conn_interval_max = __constant_cpu_to_le16(0x0038); |
63 | cp.supervision_timeout = cpu_to_le16(0x002a); | 50 | cp.supervision_timeout = __constant_cpu_to_le16(0x002a); |
64 | cp.min_ce_len = cpu_to_le16(0x0000); | 51 | cp.min_ce_len = __constant_cpu_to_le16(0x0000); |
65 | cp.max_ce_len = cpu_to_le16(0x0000); | 52 | cp.max_ce_len = __constant_cpu_to_le16(0x0000); |
66 | 53 | ||
67 | hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); | 54 | hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); |
68 | } | 55 | } |
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn) | |||
99 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; | 86 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; |
100 | cp.pscan_mode = ie->data.pscan_mode; | 87 | cp.pscan_mode = ie->data.pscan_mode; |
101 | cp.clock_offset = ie->data.clock_offset | | 88 | cp.clock_offset = ie->data.clock_offset | |
102 | cpu_to_le16(0x8000); | 89 | __constant_cpu_to_le16(0x8000); |
103 | } | 90 | } |
104 | 91 | ||
105 | memcpy(conn->dev_class, ie->data.dev_class, 3); | 92 | memcpy(conn->dev_class, ie->data.dev_class, 3); |
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) | |||
175 | cp.handle = cpu_to_le16(handle); | 162 | cp.handle = cpu_to_le16(handle); |
176 | cp.pkt_type = cpu_to_le16(conn->pkt_type); | 163 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
177 | 164 | ||
178 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); | 165 | cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); |
179 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | 166 | cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); |
180 | cp.max_latency = cpu_to_le16(0xffff); | 167 | cp.max_latency = __constant_cpu_to_le16(0xffff); |
181 | cp.voice_setting = cpu_to_le16(hdev->voice_setting); | 168 | cp.voice_setting = cpu_to_le16(hdev->voice_setting); |
182 | cp.retrans_effort = 0xff; | 169 | cp.retrans_effort = 0xff; |
183 | 170 | ||
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) | |||
185 | } | 172 | } |
186 | 173 | ||
187 | void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, | 174 | void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, |
188 | u16 latency, u16 to_multiplier) | 175 | u16 latency, u16 to_multiplier) |
189 | { | 176 | { |
190 | struct hci_cp_le_conn_update cp; | 177 | struct hci_cp_le_conn_update cp; |
191 | struct hci_dev *hdev = conn->hdev; | 178 | struct hci_dev *hdev = conn->hdev; |
@@ -197,15 +184,14 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, | |||
197 | cp.conn_interval_max = cpu_to_le16(max); | 184 | cp.conn_interval_max = cpu_to_le16(max); |
198 | cp.conn_latency = cpu_to_le16(latency); | 185 | cp.conn_latency = cpu_to_le16(latency); |
199 | cp.supervision_timeout = cpu_to_le16(to_multiplier); | 186 | cp.supervision_timeout = cpu_to_le16(to_multiplier); |
200 | cp.min_ce_len = cpu_to_le16(0x0001); | 187 | cp.min_ce_len = __constant_cpu_to_le16(0x0001); |
201 | cp.max_ce_len = cpu_to_le16(0x0001); | 188 | cp.max_ce_len = __constant_cpu_to_le16(0x0001); |
202 | 189 | ||
203 | hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); | 190 | hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); |
204 | } | 191 | } |
205 | EXPORT_SYMBOL(hci_le_conn_update); | ||
206 | 192 | ||
207 | void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], | 193 | void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], |
208 | __u8 ltk[16]) | 194 | __u8 ltk[16]) |
209 | { | 195 | { |
210 | struct hci_dev *hdev = conn->hdev; | 196 | struct hci_dev *hdev = conn->hdev; |
211 | struct hci_cp_le_start_enc cp; | 197 | struct hci_cp_le_start_enc cp; |
@@ -221,7 +207,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], | |||
221 | 207 | ||
222 | hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); | 208 | hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); |
223 | } | 209 | } |
224 | EXPORT_SYMBOL(hci_le_start_enc); | ||
225 | 210 | ||
226 | /* Device _must_ be locked */ | 211 | /* Device _must_ be locked */ |
227 | void hci_sco_setup(struct hci_conn *conn, __u8 status) | 212 | void hci_sco_setup(struct hci_conn *conn, __u8 status) |
@@ -247,7 +232,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) | |||
247 | static void hci_conn_timeout(struct work_struct *work) | 232 | static void hci_conn_timeout(struct work_struct *work) |
248 | { | 233 | { |
249 | struct hci_conn *conn = container_of(work, struct hci_conn, | 234 | struct hci_conn *conn = container_of(work, struct hci_conn, |
250 | disc_work.work); | 235 | disc_work.work); |
251 | __u8 reason; | 236 | __u8 reason; |
252 | 237 | ||
253 | BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); | 238 | BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); |
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) | |||
295 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { | 280 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { |
296 | struct hci_cp_sniff_subrate cp; | 281 | struct hci_cp_sniff_subrate cp; |
297 | cp.handle = cpu_to_le16(conn->handle); | 282 | cp.handle = cpu_to_le16(conn->handle); |
298 | cp.max_latency = cpu_to_le16(0); | 283 | cp.max_latency = __constant_cpu_to_le16(0); |
299 | cp.min_remote_timeout = cpu_to_le16(0); | 284 | cp.min_remote_timeout = __constant_cpu_to_le16(0); |
300 | cp.min_local_timeout = cpu_to_le16(0); | 285 | cp.min_local_timeout = __constant_cpu_to_le16(0); |
301 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); | 286 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); |
302 | } | 287 | } |
303 | 288 | ||
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) | |||
306 | cp.handle = cpu_to_le16(conn->handle); | 291 | cp.handle = cpu_to_le16(conn->handle); |
307 | cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); | 292 | cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); |
308 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); | 293 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); |
309 | cp.attempt = cpu_to_le16(4); | 294 | cp.attempt = __constant_cpu_to_le16(4); |
310 | cp.timeout = cpu_to_le16(1); | 295 | cp.timeout = __constant_cpu_to_le16(1); |
311 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); | 296 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); |
312 | } | 297 | } |
313 | } | 298 | } |
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg) | |||
327 | struct hci_dev *hdev = conn->hdev; | 312 | struct hci_dev *hdev = conn->hdev; |
328 | 313 | ||
329 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), | 314 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), |
330 | &conn->dst); | 315 | &conn->dst); |
331 | } | 316 | } |
332 | 317 | ||
333 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | 318 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) |
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
376 | INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); | 361 | INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); |
377 | setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); | 362 | setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); |
378 | setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, | 363 | setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, |
379 | (unsigned long) conn); | 364 | (unsigned long) conn); |
380 | 365 | ||
381 | atomic_set(&conn->refcnt, 0); | 366 | atomic_set(&conn->refcnt, 0); |
382 | 367 | ||
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn) | |||
425 | } | 410 | } |
426 | } | 411 | } |
427 | 412 | ||
428 | |||
429 | hci_chan_list_flush(conn); | 413 | hci_chan_list_flush(conn); |
430 | 414 | ||
415 | if (conn->amp_mgr) | ||
416 | amp_mgr_put(conn->amp_mgr); | ||
417 | |||
431 | hci_conn_hash_del(hdev, conn); | 418 | hci_conn_hash_del(hdev, conn); |
432 | if (hdev->notify) | 419 | if (hdev->notify) |
433 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); | 420 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); |
@@ -454,7 +441,8 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | |||
454 | read_lock(&hci_dev_list_lock); | 441 | read_lock(&hci_dev_list_lock); |
455 | 442 | ||
456 | list_for_each_entry(d, &hci_dev_list, list) { | 443 | list_for_each_entry(d, &hci_dev_list, list) { |
457 | if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) | 444 | if (!test_bit(HCI_UP, &d->flags) || |
445 | test_bit(HCI_RAW, &d->flags)) | ||
458 | continue; | 446 | continue; |
459 | 447 | ||
460 | /* Simple routing: | 448 | /* Simple routing: |
@@ -495,6 +483,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, | |||
495 | if (type == LE_LINK) { | 483 | if (type == LE_LINK) { |
496 | le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); | 484 | le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); |
497 | if (!le) { | 485 | if (!le) { |
486 | le = hci_conn_hash_lookup_state(hdev, LE_LINK, | ||
487 | BT_CONNECT); | ||
488 | if (le) | ||
489 | return ERR_PTR(-EBUSY); | ||
490 | |||
498 | le = hci_conn_add(hdev, LE_LINK, dst); | 491 | le = hci_conn_add(hdev, LE_LINK, dst); |
499 | if (!le) | 492 | if (!le) |
500 | return ERR_PTR(-ENOMEM); | 493 | return ERR_PTR(-ENOMEM); |
@@ -545,7 +538,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, | |||
545 | hci_conn_hold(sco); | 538 | hci_conn_hold(sco); |
546 | 539 | ||
547 | if (acl->state == BT_CONNECTED && | 540 | if (acl->state == BT_CONNECTED && |
548 | (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { | 541 | (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { |
549 | set_bit(HCI_CONN_POWER_SAVE, &acl->flags); | 542 | set_bit(HCI_CONN_POWER_SAVE, &acl->flags); |
550 | hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); | 543 | hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); |
551 | 544 | ||
@@ -560,7 +553,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, | |||
560 | 553 | ||
561 | return sco; | 554 | return sco; |
562 | } | 555 | } |
563 | EXPORT_SYMBOL(hci_connect); | ||
564 | 556 | ||
565 | /* Check link security requirement */ | 557 | /* Check link security requirement */ |
566 | int hci_conn_check_link_mode(struct hci_conn *conn) | 558 | int hci_conn_check_link_mode(struct hci_conn *conn) |
@@ -572,7 +564,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn) | |||
572 | 564 | ||
573 | return 1; | 565 | return 1; |
574 | } | 566 | } |
575 | EXPORT_SYMBOL(hci_conn_check_link_mode); | ||
576 | 567 | ||
577 | /* Authenticate remote device */ | 568 | /* Authenticate remote device */ |
578 | static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | 569 | static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) |
@@ -600,7 +591,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
600 | 591 | ||
601 | cp.handle = cpu_to_le16(conn->handle); | 592 | cp.handle = cpu_to_le16(conn->handle); |
602 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, | 593 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, |
603 | sizeof(cp), &cp); | 594 | sizeof(cp), &cp); |
604 | if (conn->key_type != 0xff) | 595 | if (conn->key_type != 0xff) |
605 | set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); | 596 | set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); |
606 | } | 597 | } |
@@ -618,7 +609,7 @@ static void hci_conn_encrypt(struct hci_conn *conn) | |||
618 | cp.handle = cpu_to_le16(conn->handle); | 609 | cp.handle = cpu_to_le16(conn->handle); |
619 | cp.encrypt = 0x01; | 610 | cp.encrypt = 0x01; |
620 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), | 611 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), |
621 | &cp); | 612 | &cp); |
622 | } | 613 | } |
623 | } | 614 | } |
624 | 615 | ||
@@ -648,8 +639,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
648 | /* An unauthenticated combination key has sufficient security for | 639 | /* An unauthenticated combination key has sufficient security for |
649 | security level 1 and 2. */ | 640 | security level 1 and 2. */ |
650 | if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && | 641 | if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && |
651 | (sec_level == BT_SECURITY_MEDIUM || | 642 | (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) |
652 | sec_level == BT_SECURITY_LOW)) | ||
653 | goto encrypt; | 643 | goto encrypt; |
654 | 644 | ||
655 | /* A combination key has always sufficient security for the security | 645 | /* A combination key has always sufficient security for the security |
@@ -657,8 +647,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
657 | is generated using maximum PIN code length (16). | 647 | is generated using maximum PIN code length (16). |
658 | For pre 2.1 units. */ | 648 | For pre 2.1 units. */ |
659 | if (conn->key_type == HCI_LK_COMBINATION && | 649 | if (conn->key_type == HCI_LK_COMBINATION && |
660 | (sec_level != BT_SECURITY_HIGH || | 650 | (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16)) |
661 | conn->pin_length == 16)) | ||
662 | goto encrypt; | 651 | goto encrypt; |
663 | 652 | ||
664 | auth: | 653 | auth: |
@@ -701,12 +690,11 @@ int hci_conn_change_link_key(struct hci_conn *conn) | |||
701 | struct hci_cp_change_conn_link_key cp; | 690 | struct hci_cp_change_conn_link_key cp; |
702 | cp.handle = cpu_to_le16(conn->handle); | 691 | cp.handle = cpu_to_le16(conn->handle); |
703 | hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, | 692 | hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, |
704 | sizeof(cp), &cp); | 693 | sizeof(cp), &cp); |
705 | } | 694 | } |
706 | 695 | ||
707 | return 0; | 696 | return 0; |
708 | } | 697 | } |
709 | EXPORT_SYMBOL(hci_conn_change_link_key); | ||
710 | 698 | ||
711 | /* Switch role */ | 699 | /* Switch role */ |
712 | int hci_conn_switch_role(struct hci_conn *conn, __u8 role) | 700 | int hci_conn_switch_role(struct hci_conn *conn, __u8 role) |
@@ -752,7 +740,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) | |||
752 | timer: | 740 | timer: |
753 | if (hdev->idle_timeout > 0) | 741 | if (hdev->idle_timeout > 0) |
754 | mod_timer(&conn->idle_timer, | 742 | mod_timer(&conn->idle_timer, |
755 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); | 743 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); |
756 | } | 744 | } |
757 | 745 | ||
758 | /* Drop all connection on the device */ | 746 | /* Drop all connection on the device */ |
@@ -802,7 +790,7 @@ EXPORT_SYMBOL(hci_conn_put_device); | |||
802 | 790 | ||
803 | int hci_get_conn_list(void __user *arg) | 791 | int hci_get_conn_list(void __user *arg) |
804 | { | 792 | { |
805 | register struct hci_conn *c; | 793 | struct hci_conn *c; |
806 | struct hci_conn_list_req req, *cl; | 794 | struct hci_conn_list_req req, *cl; |
807 | struct hci_conn_info *ci; | 795 | struct hci_conn_info *ci; |
808 | struct hci_dev *hdev; | 796 | struct hci_dev *hdev; |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 411ace8e647b..08994ecc3b6a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -25,28 +25,10 @@ | |||
25 | 25 | ||
26 | /* Bluetooth HCI core. */ | 26 | /* Bluetooth HCI core. */ |
27 | 27 | ||
28 | #include <linux/jiffies.h> | 28 | #include <linux/export.h> |
29 | #include <linux/module.h> | 29 | #include <linux/idr.h> |
30 | #include <linux/kmod.h> | ||
31 | |||
32 | #include <linux/types.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/poll.h> | ||
38 | #include <linux/fcntl.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/rfkill.h> | ||
44 | #include <linux/timer.h> | ||
45 | #include <linux/crypto.h> | ||
46 | #include <net/sock.h> | ||
47 | 30 | ||
48 | #include <linux/uaccess.h> | 31 | #include <linux/rfkill.h> |
49 | #include <asm/unaligned.h> | ||
50 | 32 | ||
51 | #include <net/bluetooth/bluetooth.h> | 33 | #include <net/bluetooth/bluetooth.h> |
52 | #include <net/bluetooth/hci_core.h> | 34 | #include <net/bluetooth/hci_core.h> |
@@ -65,6 +47,9 @@ DEFINE_RWLOCK(hci_dev_list_lock); | |||
65 | LIST_HEAD(hci_cb_list); | 47 | LIST_HEAD(hci_cb_list); |
66 | DEFINE_RWLOCK(hci_cb_list_lock); | 48 | DEFINE_RWLOCK(hci_cb_list_lock); |
67 | 49 | ||
50 | /* HCI ID Numbering */ | ||
51 | static DEFINE_IDA(hci_index_ida); | ||
52 | |||
68 | /* ---- HCI notifications ---- */ | 53 | /* ---- HCI notifications ---- */ |
69 | 54 | ||
70 | static void hci_notify(struct hci_dev *hdev, int event) | 55 | static void hci_notify(struct hci_dev *hdev, int event) |
@@ -124,8 +109,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err) | |||
124 | } | 109 | } |
125 | 110 | ||
126 | /* Execute request and wait for completion. */ | 111 | /* Execute request and wait for completion. */ |
127 | static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), | 112 | static int __hci_request(struct hci_dev *hdev, |
128 | unsigned long opt, __u32 timeout) | 113 | void (*req)(struct hci_dev *hdev, unsigned long opt), |
114 | unsigned long opt, __u32 timeout) | ||
129 | { | 115 | { |
130 | DECLARE_WAITQUEUE(wait, current); | 116 | DECLARE_WAITQUEUE(wait, current); |
131 | int err = 0; | 117 | int err = 0; |
@@ -166,8 +152,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, | |||
166 | return err; | 152 | return err; |
167 | } | 153 | } |
168 | 154 | ||
169 | static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), | 155 | static int hci_request(struct hci_dev *hdev, |
170 | unsigned long opt, __u32 timeout) | 156 | void (*req)(struct hci_dev *hdev, unsigned long opt), |
157 | unsigned long opt, __u32 timeout) | ||
171 | { | 158 | { |
172 | int ret; | 159 | int ret; |
173 | 160 | ||
@@ -202,7 +189,7 @@ static void bredr_init(struct hci_dev *hdev) | |||
202 | /* Mandatory initialization */ | 189 | /* Mandatory initialization */ |
203 | 190 | ||
204 | /* Reset */ | 191 | /* Reset */ |
205 | if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { | 192 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
206 | set_bit(HCI_RESET, &hdev->flags); | 193 | set_bit(HCI_RESET, &hdev->flags); |
207 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); | 194 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); |
208 | } | 195 | } |
@@ -235,7 +222,7 @@ static void bredr_init(struct hci_dev *hdev) | |||
235 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | 222 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); |
236 | 223 | ||
237 | /* Connection accept timeout ~20 secs */ | 224 | /* Connection accept timeout ~20 secs */ |
238 | param = cpu_to_le16(0x7d00); | 225 | param = __constant_cpu_to_le16(0x7d00); |
239 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | 226 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); |
240 | 227 | ||
241 | bacpy(&cp.bdaddr, BDADDR_ANY); | 228 | bacpy(&cp.bdaddr, BDADDR_ANY); |
@@ -417,7 +404,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev) | |||
417 | INIT_LIST_HEAD(&cache->resolve); | 404 | INIT_LIST_HEAD(&cache->resolve); |
418 | } | 405 | } |
419 | 406 | ||
420 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) | 407 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, |
408 | bdaddr_t *bdaddr) | ||
421 | { | 409 | { |
422 | struct discovery_state *cache = &hdev->discovery; | 410 | struct discovery_state *cache = &hdev->discovery; |
423 | struct inquiry_entry *e; | 411 | struct inquiry_entry *e; |
@@ -478,7 +466,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, | |||
478 | 466 | ||
479 | list_for_each_entry(p, &cache->resolve, list) { | 467 | list_for_each_entry(p, &cache->resolve, list) { |
480 | if (p->name_state != NAME_PENDING && | 468 | if (p->name_state != NAME_PENDING && |
481 | abs(p->data.rssi) >= abs(ie->data.rssi)) | 469 | abs(p->data.rssi) >= abs(ie->data.rssi)) |
482 | break; | 470 | break; |
483 | pos = &p->list; | 471 | pos = &p->list; |
484 | } | 472 | } |
@@ -503,7 +491,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, | |||
503 | *ssp = true; | 491 | *ssp = true; |
504 | 492 | ||
505 | if (ie->name_state == NAME_NEEDED && | 493 | if (ie->name_state == NAME_NEEDED && |
506 | data->rssi != ie->data.rssi) { | 494 | data->rssi != ie->data.rssi) { |
507 | ie->data.rssi = data->rssi; | 495 | ie->data.rssi = data->rssi; |
508 | hci_inquiry_cache_update_resolve(hdev, ie); | 496 | hci_inquiry_cache_update_resolve(hdev, ie); |
509 | } | 497 | } |
@@ -527,7 +515,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, | |||
527 | 515 | ||
528 | update: | 516 | update: |
529 | if (name_known && ie->name_state != NAME_KNOWN && | 517 | if (name_known && ie->name_state != NAME_KNOWN && |
530 | ie->name_state != NAME_PENDING) { | 518 | ie->name_state != NAME_PENDING) { |
531 | ie->name_state = NAME_KNOWN; | 519 | ie->name_state = NAME_KNOWN; |
532 | list_del(&ie->list); | 520 | list_del(&ie->list); |
533 | } | 521 | } |
@@ -605,8 +593,7 @@ int hci_inquiry(void __user *arg) | |||
605 | 593 | ||
606 | hci_dev_lock(hdev); | 594 | hci_dev_lock(hdev); |
607 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || | 595 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || |
608 | inquiry_cache_empty(hdev) || | 596 | inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { |
609 | ir.flags & IREQ_CACHE_FLUSH) { | ||
610 | inquiry_cache_flush(hdev); | 597 | inquiry_cache_flush(hdev); |
611 | do_inquiry = 1; | 598 | do_inquiry = 1; |
612 | } | 599 | } |
@@ -620,7 +607,9 @@ int hci_inquiry(void __user *arg) | |||
620 | goto done; | 607 | goto done; |
621 | } | 608 | } |
622 | 609 | ||
623 | /* for unlimited number of responses we will use buffer with 255 entries */ | 610 | /* for unlimited number of responses we will use buffer with |
611 | * 255 entries | ||
612 | */ | ||
624 | max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; | 613 | max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; |
625 | 614 | ||
626 | /* cache_dump can't sleep. Therefore we allocate temp buffer and then | 615 | /* cache_dump can't sleep. Therefore we allocate temp buffer and then |
@@ -641,7 +630,7 @@ int hci_inquiry(void __user *arg) | |||
641 | if (!copy_to_user(ptr, &ir, sizeof(ir))) { | 630 | if (!copy_to_user(ptr, &ir, sizeof(ir))) { |
642 | ptr += sizeof(ir); | 631 | ptr += sizeof(ir); |
643 | if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * | 632 | if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * |
644 | ir.num_rsp)) | 633 | ir.num_rsp)) |
645 | err = -EFAULT; | 634 | err = -EFAULT; |
646 | } else | 635 | } else |
647 | err = -EFAULT; | 636 | err = -EFAULT; |
@@ -702,11 +691,11 @@ int hci_dev_open(__u16 dev) | |||
702 | hdev->init_last_cmd = 0; | 691 | hdev->init_last_cmd = 0; |
703 | 692 | ||
704 | ret = __hci_request(hdev, hci_init_req, 0, | 693 | ret = __hci_request(hdev, hci_init_req, 0, |
705 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 694 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
706 | 695 | ||
707 | if (lmp_host_le_capable(hdev)) | 696 | if (lmp_host_le_capable(hdev)) |
708 | ret = __hci_request(hdev, hci_le_init_req, 0, | 697 | ret = __hci_request(hdev, hci_le_init_req, 0, |
709 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 698 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
710 | 699 | ||
711 | clear_bit(HCI_INIT, &hdev->flags); | 700 | clear_bit(HCI_INIT, &hdev->flags); |
712 | } | 701 | } |
@@ -791,10 +780,10 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
791 | skb_queue_purge(&hdev->cmd_q); | 780 | skb_queue_purge(&hdev->cmd_q); |
792 | atomic_set(&hdev->cmd_cnt, 1); | 781 | atomic_set(&hdev->cmd_cnt, 1); |
793 | if (!test_bit(HCI_RAW, &hdev->flags) && | 782 | if (!test_bit(HCI_RAW, &hdev->flags) && |
794 | test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { | 783 | test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
795 | set_bit(HCI_INIT, &hdev->flags); | 784 | set_bit(HCI_INIT, &hdev->flags); |
796 | __hci_request(hdev, hci_reset_req, 0, | 785 | __hci_request(hdev, hci_reset_req, 0, |
797 | msecs_to_jiffies(250)); | 786 | msecs_to_jiffies(250)); |
798 | clear_bit(HCI_INIT, &hdev->flags); | 787 | clear_bit(HCI_INIT, &hdev->flags); |
799 | } | 788 | } |
800 | 789 | ||
@@ -884,7 +873,7 @@ int hci_dev_reset(__u16 dev) | |||
884 | 873 | ||
885 | if (!test_bit(HCI_RAW, &hdev->flags)) | 874 | if (!test_bit(HCI_RAW, &hdev->flags)) |
886 | ret = __hci_request(hdev, hci_reset_req, 0, | 875 | ret = __hci_request(hdev, hci_reset_req, 0, |
887 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 876 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
888 | 877 | ||
889 | done: | 878 | done: |
890 | hci_req_unlock(hdev); | 879 | hci_req_unlock(hdev); |
@@ -924,7 +913,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
924 | switch (cmd) { | 913 | switch (cmd) { |
925 | case HCISETAUTH: | 914 | case HCISETAUTH: |
926 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, | 915 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
927 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 916 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
928 | break; | 917 | break; |
929 | 918 | ||
930 | case HCISETENCRYPT: | 919 | case HCISETENCRYPT: |
@@ -936,23 +925,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
936 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | 925 | if (!test_bit(HCI_AUTH, &hdev->flags)) { |
937 | /* Auth must be enabled first */ | 926 | /* Auth must be enabled first */ |
938 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, | 927 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, |
939 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 928 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
940 | if (err) | 929 | if (err) |
941 | break; | 930 | break; |
942 | } | 931 | } |
943 | 932 | ||
944 | err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, | 933 | err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, |
945 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 934 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
946 | break; | 935 | break; |
947 | 936 | ||
948 | case HCISETSCAN: | 937 | case HCISETSCAN: |
949 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, | 938 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, |
950 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 939 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
951 | break; | 940 | break; |
952 | 941 | ||
953 | case HCISETLINKPOL: | 942 | case HCISETLINKPOL: |
954 | err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, | 943 | err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, |
955 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 944 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
956 | break; | 945 | break; |
957 | 946 | ||
958 | case HCISETLINKMODE: | 947 | case HCISETLINKMODE: |
@@ -1103,7 +1092,7 @@ static void hci_power_on(struct work_struct *work) | |||
1103 | 1092 | ||
1104 | if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) | 1093 | if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
1105 | schedule_delayed_work(&hdev->power_off, | 1094 | schedule_delayed_work(&hdev->power_off, |
1106 | msecs_to_jiffies(AUTO_OFF_TIMEOUT)); | 1095 | msecs_to_jiffies(AUTO_OFF_TIMEOUT)); |
1107 | 1096 | ||
1108 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) | 1097 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) |
1109 | mgmt_index_added(hdev); | 1098 | mgmt_index_added(hdev); |
@@ -1112,7 +1101,7 @@ static void hci_power_on(struct work_struct *work) | |||
1112 | static void hci_power_off(struct work_struct *work) | 1101 | static void hci_power_off(struct work_struct *work) |
1113 | { | 1102 | { |
1114 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 1103 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
1115 | power_off.work); | 1104 | power_off.work); |
1116 | 1105 | ||
1117 | BT_DBG("%s", hdev->name); | 1106 | BT_DBG("%s", hdev->name); |
1118 | 1107 | ||
@@ -1193,7 +1182,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1193 | } | 1182 | } |
1194 | 1183 | ||
1195 | static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, | 1184 | static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, |
1196 | u8 key_type, u8 old_key_type) | 1185 | u8 key_type, u8 old_key_type) |
1197 | { | 1186 | { |
1198 | /* Legacy key */ | 1187 | /* Legacy key */ |
1199 | if (key_type < 0x03) | 1188 | if (key_type < 0x03) |
@@ -1234,7 +1223,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) | |||
1234 | 1223 | ||
1235 | list_for_each_entry(k, &hdev->long_term_keys, list) { | 1224 | list_for_each_entry(k, &hdev->long_term_keys, list) { |
1236 | if (k->ediv != ediv || | 1225 | if (k->ediv != ediv || |
1237 | memcmp(rand, k->rand, sizeof(k->rand))) | 1226 | memcmp(rand, k->rand, sizeof(k->rand))) |
1238 | continue; | 1227 | continue; |
1239 | 1228 | ||
1240 | return k; | 1229 | return k; |
@@ -1242,7 +1231,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) | |||
1242 | 1231 | ||
1243 | return NULL; | 1232 | return NULL; |
1244 | } | 1233 | } |
1245 | EXPORT_SYMBOL(hci_find_ltk); | ||
1246 | 1234 | ||
1247 | struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, | 1235 | struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1248 | u8 addr_type) | 1236 | u8 addr_type) |
@@ -1251,12 +1239,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
1251 | 1239 | ||
1252 | list_for_each_entry(k, &hdev->long_term_keys, list) | 1240 | list_for_each_entry(k, &hdev->long_term_keys, list) |
1253 | if (addr_type == k->bdaddr_type && | 1241 | if (addr_type == k->bdaddr_type && |
1254 | bacmp(bdaddr, &k->bdaddr) == 0) | 1242 | bacmp(bdaddr, &k->bdaddr) == 0) |
1255 | return k; | 1243 | return k; |
1256 | 1244 | ||
1257 | return NULL; | 1245 | return NULL; |
1258 | } | 1246 | } |
1259 | EXPORT_SYMBOL(hci_find_ltk_by_addr); | ||
1260 | 1247 | ||
1261 | int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, | 1248 | int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, |
1262 | bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) | 1249 | bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) |
@@ -1283,15 +1270,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, | |||
1283 | * combination key for legacy pairing even when there's no | 1270 | * combination key for legacy pairing even when there's no |
1284 | * previous key */ | 1271 | * previous key */ |
1285 | if (type == HCI_LK_CHANGED_COMBINATION && | 1272 | if (type == HCI_LK_CHANGED_COMBINATION && |
1286 | (!conn || conn->remote_auth == 0xff) && | 1273 | (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { |
1287 | old_key_type == 0xff) { | ||
1288 | type = HCI_LK_COMBINATION; | 1274 | type = HCI_LK_COMBINATION; |
1289 | if (conn) | 1275 | if (conn) |
1290 | conn->key_type = type; | 1276 | conn->key_type = type; |
1291 | } | 1277 | } |
1292 | 1278 | ||
1293 | bacpy(&key->bdaddr, bdaddr); | 1279 | bacpy(&key->bdaddr, bdaddr); |
1294 | memcpy(key->val, val, 16); | 1280 | memcpy(key->val, val, HCI_LINK_KEY_SIZE); |
1295 | key->pin_len = pin_len; | 1281 | key->pin_len = pin_len; |
1296 | 1282 | ||
1297 | if (type == HCI_LK_CHANGED_COMBINATION) | 1283 | if (type == HCI_LK_CHANGED_COMBINATION) |
@@ -1540,6 +1526,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) | |||
1540 | 1526 | ||
1541 | memset(&cp, 0, sizeof(cp)); | 1527 | memset(&cp, 0, sizeof(cp)); |
1542 | cp.enable = 1; | 1528 | cp.enable = 1; |
1529 | cp.filter_dup = 1; | ||
1543 | 1530 | ||
1544 | hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | 1531 | hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); |
1545 | } | 1532 | } |
@@ -1707,41 +1694,39 @@ EXPORT_SYMBOL(hci_free_dev); | |||
1707 | /* Register HCI device */ | 1694 | /* Register HCI device */ |
1708 | int hci_register_dev(struct hci_dev *hdev) | 1695 | int hci_register_dev(struct hci_dev *hdev) |
1709 | { | 1696 | { |
1710 | struct list_head *head, *p; | ||
1711 | int id, error; | 1697 | int id, error; |
1712 | 1698 | ||
1713 | if (!hdev->open || !hdev->close) | 1699 | if (!hdev->open || !hdev->close) |
1714 | return -EINVAL; | 1700 | return -EINVAL; |
1715 | 1701 | ||
1716 | write_lock(&hci_dev_list_lock); | ||
1717 | |||
1718 | /* Do not allow HCI_AMP devices to register at index 0, | 1702 | /* Do not allow HCI_AMP devices to register at index 0, |
1719 | * so the index can be used as the AMP controller ID. | 1703 | * so the index can be used as the AMP controller ID. |
1720 | */ | 1704 | */ |
1721 | id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; | 1705 | switch (hdev->dev_type) { |
1722 | head = &hci_dev_list; | 1706 | case HCI_BREDR: |
1723 | 1707 | id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); | |
1724 | /* Find first available device id */ | 1708 | break; |
1725 | list_for_each(p, &hci_dev_list) { | 1709 | case HCI_AMP: |
1726 | int nid = list_entry(p, struct hci_dev, list)->id; | 1710 | id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); |
1727 | if (nid > id) | 1711 | break; |
1728 | break; | 1712 | default: |
1729 | if (nid == id) | 1713 | return -EINVAL; |
1730 | id++; | ||
1731 | head = p; | ||
1732 | } | 1714 | } |
1733 | 1715 | ||
1716 | if (id < 0) | ||
1717 | return id; | ||
1718 | |||
1734 | sprintf(hdev->name, "hci%d", id); | 1719 | sprintf(hdev->name, "hci%d", id); |
1735 | hdev->id = id; | 1720 | hdev->id = id; |
1736 | 1721 | ||
1737 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | 1722 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
1738 | 1723 | ||
1739 | list_add(&hdev->list, head); | 1724 | write_lock(&hci_dev_list_lock); |
1740 | 1725 | list_add(&hdev->list, &hci_dev_list); | |
1741 | write_unlock(&hci_dev_list_lock); | 1726 | write_unlock(&hci_dev_list_lock); |
1742 | 1727 | ||
1743 | hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | | 1728 | hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | |
1744 | WQ_MEM_RECLAIM, 1); | 1729 | WQ_MEM_RECLAIM, 1); |
1745 | if (!hdev->workqueue) { | 1730 | if (!hdev->workqueue) { |
1746 | error = -ENOMEM; | 1731 | error = -ENOMEM; |
1747 | goto err; | 1732 | goto err; |
@@ -1752,7 +1737,8 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1752 | goto err_wqueue; | 1737 | goto err_wqueue; |
1753 | 1738 | ||
1754 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, | 1739 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, |
1755 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); | 1740 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, |
1741 | hdev); | ||
1756 | if (hdev->rfkill) { | 1742 | if (hdev->rfkill) { |
1757 | if (rfkill_register(hdev->rfkill) < 0) { | 1743 | if (rfkill_register(hdev->rfkill) < 0) { |
1758 | rfkill_destroy(hdev->rfkill); | 1744 | rfkill_destroy(hdev->rfkill); |
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1772 | err_wqueue: | 1758 | err_wqueue: |
1773 | destroy_workqueue(hdev->workqueue); | 1759 | destroy_workqueue(hdev->workqueue); |
1774 | err: | 1760 | err: |
1761 | ida_simple_remove(&hci_index_ida, hdev->id); | ||
1775 | write_lock(&hci_dev_list_lock); | 1762 | write_lock(&hci_dev_list_lock); |
1776 | list_del(&hdev->list); | 1763 | list_del(&hdev->list); |
1777 | write_unlock(&hci_dev_list_lock); | 1764 | write_unlock(&hci_dev_list_lock); |
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev); | |||
1783 | /* Unregister HCI device */ | 1770 | /* Unregister HCI device */ |
1784 | void hci_unregister_dev(struct hci_dev *hdev) | 1771 | void hci_unregister_dev(struct hci_dev *hdev) |
1785 | { | 1772 | { |
1786 | int i; | 1773 | int i, id; |
1787 | 1774 | ||
1788 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | 1775 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
1789 | 1776 | ||
1790 | set_bit(HCI_UNREGISTER, &hdev->dev_flags); | 1777 | set_bit(HCI_UNREGISTER, &hdev->dev_flags); |
1791 | 1778 | ||
1779 | id = hdev->id; | ||
1780 | |||
1792 | write_lock(&hci_dev_list_lock); | 1781 | write_lock(&hci_dev_list_lock); |
1793 | list_del(&hdev->list); | 1782 | list_del(&hdev->list); |
1794 | write_unlock(&hci_dev_list_lock); | 1783 | write_unlock(&hci_dev_list_lock); |
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev) | |||
1799 | kfree_skb(hdev->reassembly[i]); | 1788 | kfree_skb(hdev->reassembly[i]); |
1800 | 1789 | ||
1801 | if (!test_bit(HCI_INIT, &hdev->flags) && | 1790 | if (!test_bit(HCI_INIT, &hdev->flags) && |
1802 | !test_bit(HCI_SETUP, &hdev->dev_flags)) { | 1791 | !test_bit(HCI_SETUP, &hdev->dev_flags)) { |
1803 | hci_dev_lock(hdev); | 1792 | hci_dev_lock(hdev); |
1804 | mgmt_index_removed(hdev); | 1793 | mgmt_index_removed(hdev); |
1805 | hci_dev_unlock(hdev); | 1794 | hci_dev_unlock(hdev); |
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev) | |||
1829 | hci_dev_unlock(hdev); | 1818 | hci_dev_unlock(hdev); |
1830 | 1819 | ||
1831 | hci_dev_put(hdev); | 1820 | hci_dev_put(hdev); |
1821 | |||
1822 | ida_simple_remove(&hci_index_ida, id); | ||
1832 | } | 1823 | } |
1833 | EXPORT_SYMBOL(hci_unregister_dev); | 1824 | EXPORT_SYMBOL(hci_unregister_dev); |
1834 | 1825 | ||
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb) | |||
1853 | { | 1844 | { |
1854 | struct hci_dev *hdev = (struct hci_dev *) skb->dev; | 1845 | struct hci_dev *hdev = (struct hci_dev *) skb->dev; |
1855 | if (!hdev || (!test_bit(HCI_UP, &hdev->flags) | 1846 | if (!hdev || (!test_bit(HCI_UP, &hdev->flags) |
1856 | && !test_bit(HCI_INIT, &hdev->flags))) { | 1847 | && !test_bit(HCI_INIT, &hdev->flags))) { |
1857 | kfree_skb(skb); | 1848 | kfree_skb(skb); |
1858 | return -ENXIO; | 1849 | return -ENXIO; |
1859 | } | 1850 | } |
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb) | |||
1872 | EXPORT_SYMBOL(hci_recv_frame); | 1863 | EXPORT_SYMBOL(hci_recv_frame); |
1873 | 1864 | ||
1874 | static int hci_reassembly(struct hci_dev *hdev, int type, void *data, | 1865 | static int hci_reassembly(struct hci_dev *hdev, int type, void *data, |
1875 | int count, __u8 index) | 1866 | int count, __u8 index) |
1876 | { | 1867 | { |
1877 | int len = 0; | 1868 | int len = 0; |
1878 | int hlen = 0; | 1869 | int hlen = 0; |
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data, | |||
1881 | struct bt_skb_cb *scb; | 1872 | struct bt_skb_cb *scb; |
1882 | 1873 | ||
1883 | if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || | 1874 | if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || |
1884 | index >= NUM_REASSEMBLY) | 1875 | index >= NUM_REASSEMBLY) |
1885 | return -EILSEQ; | 1876 | return -EILSEQ; |
1886 | 1877 | ||
1887 | skb = hdev->reassembly[index]; | 1878 | skb = hdev->reassembly[index]; |
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) | |||
2023 | type = bt_cb(skb)->pkt_type; | 2014 | type = bt_cb(skb)->pkt_type; |
2024 | 2015 | ||
2025 | rem = hci_reassembly(hdev, type, data, count, | 2016 | rem = hci_reassembly(hdev, type, data, count, |
2026 | STREAM_REASSEMBLY); | 2017 | STREAM_REASSEMBLY); |
2027 | if (rem < 0) | 2018 | if (rem < 0) |
2028 | return rem; | 2019 | return rem; |
2029 | 2020 | ||
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |||
2157 | } | 2148 | } |
2158 | 2149 | ||
2159 | static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, | 2150 | static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, |
2160 | struct sk_buff *skb, __u16 flags) | 2151 | struct sk_buff *skb, __u16 flags) |
2161 | { | 2152 | { |
2162 | struct hci_dev *hdev = conn->hdev; | 2153 | struct hci_dev *hdev = conn->hdev; |
2163 | struct sk_buff *list; | 2154 | struct sk_buff *list; |
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) | |||
2216 | 2207 | ||
2217 | queue_work(hdev->workqueue, &hdev->tx_work); | 2208 | queue_work(hdev->workqueue, &hdev->tx_work); |
2218 | } | 2209 | } |
2219 | EXPORT_SYMBOL(hci_send_acl); | ||
2220 | 2210 | ||
2221 | /* Send SCO data */ | 2211 | /* Send SCO data */ |
2222 | void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) | 2212 | void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) |
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) | |||
2239 | skb_queue_tail(&conn->data_q, skb); | 2229 | skb_queue_tail(&conn->data_q, skb); |
2240 | queue_work(hdev->workqueue, &hdev->tx_work); | 2230 | queue_work(hdev->workqueue, &hdev->tx_work); |
2241 | } | 2231 | } |
2242 | EXPORT_SYMBOL(hci_send_sco); | ||
2243 | 2232 | ||
2244 | /* ---- HCI TX task (outgoing data) ---- */ | 2233 | /* ---- HCI TX task (outgoing data) ---- */ |
2245 | 2234 | ||
2246 | /* HCI Connection scheduler */ | 2235 | /* HCI Connection scheduler */ |
2247 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) | 2236 | static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, |
2237 | int *quote) | ||
2248 | { | 2238 | { |
2249 | struct hci_conn_hash *h = &hdev->conn_hash; | 2239 | struct hci_conn_hash *h = &hdev->conn_hash; |
2250 | struct hci_conn *conn = NULL, *c; | 2240 | struct hci_conn *conn = NULL, *c; |
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int | |||
2303 | return conn; | 2293 | return conn; |
2304 | } | 2294 | } |
2305 | 2295 | ||
2306 | static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | 2296 | static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) |
2307 | { | 2297 | { |
2308 | struct hci_conn_hash *h = &hdev->conn_hash; | 2298 | struct hci_conn_hash *h = &hdev->conn_hash; |
2309 | struct hci_conn *c; | 2299 | struct hci_conn *c; |
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | |||
2316 | list_for_each_entry_rcu(c, &h->list, list) { | 2306 | list_for_each_entry_rcu(c, &h->list, list) { |
2317 | if (c->type == type && c->sent) { | 2307 | if (c->type == type && c->sent) { |
2318 | BT_ERR("%s killing stalled connection %s", | 2308 | BT_ERR("%s killing stalled connection %s", |
2319 | hdev->name, batostr(&c->dst)); | 2309 | hdev->name, batostr(&c->dst)); |
2320 | hci_acl_disconn(c, 0x13); | 2310 | hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); |
2321 | } | 2311 | } |
2322 | } | 2312 | } |
2323 | 2313 | ||
2324 | rcu_read_unlock(); | 2314 | rcu_read_unlock(); |
2325 | } | 2315 | } |
2326 | 2316 | ||
2327 | static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, | 2317 | static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, |
2328 | int *quote) | 2318 | int *quote) |
2329 | { | 2319 | { |
2330 | struct hci_conn_hash *h = &hdev->conn_hash; | 2320 | struct hci_conn_hash *h = &hdev->conn_hash; |
2331 | struct hci_chan *chan = NULL; | 2321 | struct hci_chan *chan = NULL; |
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) | |||
2442 | skb->priority = HCI_PRIO_MAX - 1; | 2432 | skb->priority = HCI_PRIO_MAX - 1; |
2443 | 2433 | ||
2444 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, | 2434 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, |
2445 | skb->priority); | 2435 | skb->priority); |
2446 | } | 2436 | } |
2447 | 2437 | ||
2448 | if (hci_conn_num(hdev, type) == num) | 2438 | if (hci_conn_num(hdev, type) == num) |
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) | |||
2459 | return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); | 2449 | return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); |
2460 | } | 2450 | } |
2461 | 2451 | ||
2462 | static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) | 2452 | static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) |
2463 | { | 2453 | { |
2464 | if (!test_bit(HCI_RAW, &hdev->flags)) { | 2454 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
2465 | /* ACL tx timeout must be longer than maximum | 2455 | /* ACL tx timeout must be longer than maximum |
2466 | * link supervision timeout (40.9 seconds) */ | 2456 | * link supervision timeout (40.9 seconds) */ |
2467 | if (!cnt && time_after(jiffies, hdev->acl_last_tx + | 2457 | if (!cnt && time_after(jiffies, hdev->acl_last_tx + |
2468 | msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) | 2458 | msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) |
2469 | hci_link_tx_to(hdev, ACL_LINK); | 2459 | hci_link_tx_to(hdev, ACL_LINK); |
2470 | } | 2460 | } |
2471 | } | 2461 | } |
2472 | 2462 | ||
2473 | static inline void hci_sched_acl_pkt(struct hci_dev *hdev) | 2463 | static void hci_sched_acl_pkt(struct hci_dev *hdev) |
2474 | { | 2464 | { |
2475 | unsigned int cnt = hdev->acl_cnt; | 2465 | unsigned int cnt = hdev->acl_cnt; |
2476 | struct hci_chan *chan; | 2466 | struct hci_chan *chan; |
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev) | |||
2480 | __check_timeout(hdev, cnt); | 2470 | __check_timeout(hdev, cnt); |
2481 | 2471 | ||
2482 | while (hdev->acl_cnt && | 2472 | while (hdev->acl_cnt && |
2483 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | 2473 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { |
2484 | u32 priority = (skb_peek(&chan->data_q))->priority; | 2474 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2485 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | 2475 | while (quote-- && (skb = skb_peek(&chan->data_q))) { |
2486 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | 2476 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
2487 | skb->len, skb->priority); | 2477 | skb->len, skb->priority); |
2488 | 2478 | ||
2489 | /* Stop if priority has changed */ | 2479 | /* Stop if priority has changed */ |
2490 | if (skb->priority < priority) | 2480 | if (skb->priority < priority) |
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev) | |||
2508 | hci_prio_recalculate(hdev, ACL_LINK); | 2498 | hci_prio_recalculate(hdev, ACL_LINK); |
2509 | } | 2499 | } |
2510 | 2500 | ||
2511 | static inline void hci_sched_acl_blk(struct hci_dev *hdev) | 2501 | static void hci_sched_acl_blk(struct hci_dev *hdev) |
2512 | { | 2502 | { |
2513 | unsigned int cnt = hdev->block_cnt; | 2503 | unsigned int cnt = hdev->block_cnt; |
2514 | struct hci_chan *chan; | 2504 | struct hci_chan *chan; |
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev) | |||
2518 | __check_timeout(hdev, cnt); | 2508 | __check_timeout(hdev, cnt); |
2519 | 2509 | ||
2520 | while (hdev->block_cnt > 0 && | 2510 | while (hdev->block_cnt > 0 && |
2521 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | 2511 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { |
2522 | u32 priority = (skb_peek(&chan->data_q))->priority; | 2512 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2523 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { | 2513 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { |
2524 | int blocks; | 2514 | int blocks; |
2525 | 2515 | ||
2526 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | 2516 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
2527 | skb->len, skb->priority); | 2517 | skb->len, skb->priority); |
2528 | 2518 | ||
2529 | /* Stop if priority has changed */ | 2519 | /* Stop if priority has changed */ |
2530 | if (skb->priority < priority) | 2520 | if (skb->priority < priority) |
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev) | |||
2537 | return; | 2527 | return; |
2538 | 2528 | ||
2539 | hci_conn_enter_active_mode(chan->conn, | 2529 | hci_conn_enter_active_mode(chan->conn, |
2540 | bt_cb(skb)->force_active); | 2530 | bt_cb(skb)->force_active); |
2541 | 2531 | ||
2542 | hci_send_frame(skb); | 2532 | hci_send_frame(skb); |
2543 | hdev->acl_last_tx = jiffies; | 2533 | hdev->acl_last_tx = jiffies; |
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev) | |||
2554 | hci_prio_recalculate(hdev, ACL_LINK); | 2544 | hci_prio_recalculate(hdev, ACL_LINK); |
2555 | } | 2545 | } |
2556 | 2546 | ||
2557 | static inline void hci_sched_acl(struct hci_dev *hdev) | 2547 | static void hci_sched_acl(struct hci_dev *hdev) |
2558 | { | 2548 | { |
2559 | BT_DBG("%s", hdev->name); | 2549 | BT_DBG("%s", hdev->name); |
2560 | 2550 | ||
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev) | |||
2573 | } | 2563 | } |
2574 | 2564 | ||
2575 | /* Schedule SCO */ | 2565 | /* Schedule SCO */ |
2576 | static inline void hci_sched_sco(struct hci_dev *hdev) | 2566 | static void hci_sched_sco(struct hci_dev *hdev) |
2577 | { | 2567 | { |
2578 | struct hci_conn *conn; | 2568 | struct hci_conn *conn; |
2579 | struct sk_buff *skb; | 2569 | struct sk_buff *skb; |
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev) | |||
2596 | } | 2586 | } |
2597 | } | 2587 | } |
2598 | 2588 | ||
2599 | static inline void hci_sched_esco(struct hci_dev *hdev) | 2589 | static void hci_sched_esco(struct hci_dev *hdev) |
2600 | { | 2590 | { |
2601 | struct hci_conn *conn; | 2591 | struct hci_conn *conn; |
2602 | struct sk_buff *skb; | 2592 | struct sk_buff *skb; |
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev) | |||
2607 | if (!hci_conn_num(hdev, ESCO_LINK)) | 2597 | if (!hci_conn_num(hdev, ESCO_LINK)) |
2608 | return; | 2598 | return; |
2609 | 2599 | ||
2610 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { | 2600 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, |
2601 | "e))) { | ||
2611 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 2602 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
2612 | BT_DBG("skb %p len %d", skb, skb->len); | 2603 | BT_DBG("skb %p len %d", skb, skb->len); |
2613 | hci_send_frame(skb); | 2604 | hci_send_frame(skb); |
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev) | |||
2619 | } | 2610 | } |
2620 | } | 2611 | } |
2621 | 2612 | ||
2622 | static inline void hci_sched_le(struct hci_dev *hdev) | 2613 | static void hci_sched_le(struct hci_dev *hdev) |
2623 | { | 2614 | { |
2624 | struct hci_chan *chan; | 2615 | struct hci_chan *chan; |
2625 | struct sk_buff *skb; | 2616 | struct sk_buff *skb; |
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev) | |||
2634 | /* LE tx timeout must be longer than maximum | 2625 | /* LE tx timeout must be longer than maximum |
2635 | * link supervision timeout (40.9 seconds) */ | 2626 | * link supervision timeout (40.9 seconds) */ |
2636 | if (!hdev->le_cnt && hdev->le_pkts && | 2627 | if (!hdev->le_cnt && hdev->le_pkts && |
2637 | time_after(jiffies, hdev->le_last_tx + HZ * 45)) | 2628 | time_after(jiffies, hdev->le_last_tx + HZ * 45)) |
2638 | hci_link_tx_to(hdev, LE_LINK); | 2629 | hci_link_tx_to(hdev, LE_LINK); |
2639 | } | 2630 | } |
2640 | 2631 | ||
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev) | |||
2644 | u32 priority = (skb_peek(&chan->data_q))->priority; | 2635 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2645 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | 2636 | while (quote-- && (skb = skb_peek(&chan->data_q))) { |
2646 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | 2637 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
2647 | skb->len, skb->priority); | 2638 | skb->len, skb->priority); |
2648 | 2639 | ||
2649 | /* Stop if priority has changed */ | 2640 | /* Stop if priority has changed */ |
2650 | if (skb->priority < priority) | 2641 | if (skb->priority < priority) |
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work) | |||
2676 | struct sk_buff *skb; | 2667 | struct sk_buff *skb; |
2677 | 2668 | ||
2678 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, | 2669 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, |
2679 | hdev->sco_cnt, hdev->le_cnt); | 2670 | hdev->sco_cnt, hdev->le_cnt); |
2680 | 2671 | ||
2681 | /* Schedule queues and send stuff to HCI driver */ | 2672 | /* Schedule queues and send stuff to HCI driver */ |
2682 | 2673 | ||
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work) | |||
2696 | /* ----- HCI RX task (incoming data processing) ----- */ | 2687 | /* ----- HCI RX task (incoming data processing) ----- */ |
2697 | 2688 | ||
2698 | /* ACL data packet */ | 2689 | /* ACL data packet */ |
2699 | static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | 2690 | static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) |
2700 | { | 2691 | { |
2701 | struct hci_acl_hdr *hdr = (void *) skb->data; | 2692 | struct hci_acl_hdr *hdr = (void *) skb->data; |
2702 | struct hci_conn *conn; | 2693 | struct hci_conn *conn; |
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2708 | flags = hci_flags(handle); | 2699 | flags = hci_flags(handle); |
2709 | handle = hci_handle(handle); | 2700 | handle = hci_handle(handle); |
2710 | 2701 | ||
2711 | BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); | 2702 | BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, |
2703 | handle, flags); | ||
2712 | 2704 | ||
2713 | hdev->stat.acl_rx++; | 2705 | hdev->stat.acl_rx++; |
2714 | 2706 | ||
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2732 | return; | 2724 | return; |
2733 | } else { | 2725 | } else { |
2734 | BT_ERR("%s ACL packet for unknown connection handle %d", | 2726 | BT_ERR("%s ACL packet for unknown connection handle %d", |
2735 | hdev->name, handle); | 2727 | hdev->name, handle); |
2736 | } | 2728 | } |
2737 | 2729 | ||
2738 | kfree_skb(skb); | 2730 | kfree_skb(skb); |
2739 | } | 2731 | } |
2740 | 2732 | ||
2741 | /* SCO data packet */ | 2733 | /* SCO data packet */ |
2742 | static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | 2734 | static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) |
2743 | { | 2735 | { |
2744 | struct hci_sco_hdr *hdr = (void *) skb->data; | 2736 | struct hci_sco_hdr *hdr = (void *) skb->data; |
2745 | struct hci_conn *conn; | 2737 | struct hci_conn *conn; |
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2763 | return; | 2755 | return; |
2764 | } else { | 2756 | } else { |
2765 | BT_ERR("%s SCO packet for unknown connection handle %d", | 2757 | BT_ERR("%s SCO packet for unknown connection handle %d", |
2766 | hdev->name, handle); | 2758 | hdev->name, handle); |
2767 | } | 2759 | } |
2768 | 2760 | ||
2769 | kfree_skb(skb); | 2761 | kfree_skb(skb); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4eefb7f65cf6..1ba929c05d0d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -24,20 +24,7 @@ | |||
24 | 24 | ||
25 | /* Bluetooth HCI event handling. */ | 25 | /* Bluetooth HCI event handling. */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/export.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fcntl.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <net/sock.h> | ||
39 | |||
40 | #include <linux/uaccess.h> | ||
41 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
42 | 29 | ||
43 | #include <net/bluetooth/bluetooth.h> | 30 | #include <net/bluetooth/bluetooth.h> |
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) | |||
95 | hci_conn_check_pending(hdev); | 82 | hci_conn_check_pending(hdev); |
96 | } | 83 | } |
97 | 84 | ||
98 | static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) | 85 | static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, |
86 | struct sk_buff *skb) | ||
99 | { | 87 | { |
100 | BT_DBG("%s", hdev->name); | 88 | BT_DBG("%s", hdev->name); |
101 | } | 89 | } |
@@ -166,7 +154,8 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | |||
166 | hci_dev_unlock(hdev); | 154 | hci_dev_unlock(hdev); |
167 | } | 155 | } |
168 | 156 | ||
169 | static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | 157 | static void hci_cc_read_def_link_policy(struct hci_dev *hdev, |
158 | struct sk_buff *skb) | ||
170 | { | 159 | { |
171 | struct hci_rp_read_def_link_policy *rp = (void *) skb->data; | 160 | struct hci_rp_read_def_link_policy *rp = (void *) skb->data; |
172 | 161 | ||
@@ -178,7 +167,8 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk | |||
178 | hdev->link_policy = __le16_to_cpu(rp->policy); | 167 | hdev->link_policy = __le16_to_cpu(rp->policy); |
179 | } | 168 | } |
180 | 169 | ||
181 | static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) | 170 | static void hci_cc_write_def_link_policy(struct hci_dev *hdev, |
171 | struct sk_buff *skb) | ||
182 | { | 172 | { |
183 | __u8 status = *((__u8 *) skb->data); | 173 | __u8 status = *((__u8 *) skb->data); |
184 | void *sent; | 174 | void *sent; |
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
329 | if (hdev->discov_timeout > 0) { | 319 | if (hdev->discov_timeout > 0) { |
330 | int to = msecs_to_jiffies(hdev->discov_timeout * 1000); | 320 | int to = msecs_to_jiffies(hdev->discov_timeout * 1000); |
331 | queue_delayed_work(hdev->workqueue, &hdev->discov_off, | 321 | queue_delayed_work(hdev->workqueue, &hdev->discov_off, |
332 | to); | 322 | to); |
333 | } | 323 | } |
334 | } else if (old_iscan) | 324 | } else if (old_iscan) |
335 | mgmt_discoverable(hdev, 0); | 325 | mgmt_discoverable(hdev, 0); |
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) | |||
358 | memcpy(hdev->dev_class, rp->dev_class, 3); | 348 | memcpy(hdev->dev_class, rp->dev_class, 3); |
359 | 349 | ||
360 | BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, | 350 | BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, |
361 | hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); | 351 | hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); |
362 | } | 352 | } |
363 | 353 | ||
364 | static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) | 354 | static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -406,7 +396,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | |||
406 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 396 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); |
407 | } | 397 | } |
408 | 398 | ||
409 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | 399 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, |
400 | struct sk_buff *skb) | ||
410 | { | 401 | { |
411 | __u8 status = *((__u8 *) skb->data); | 402 | __u8 status = *((__u8 *) skb->data); |
412 | __u16 setting; | 403 | __u16 setting; |
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) | |||
473 | return 1; | 464 | return 1; |
474 | 465 | ||
475 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && | 466 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && |
476 | hdev->lmp_subver == 0x0757) | 467 | hdev->lmp_subver == 0x0757) |
477 | return 1; | 468 | return 1; |
478 | 469 | ||
479 | if (hdev->manufacturer == 15) { | 470 | if (hdev->manufacturer == 15) { |
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) | |||
486 | } | 477 | } |
487 | 478 | ||
488 | if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && | 479 | if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && |
489 | hdev->lmp_subver == 0x1805) | 480 | hdev->lmp_subver == 0x1805) |
490 | return 1; | 481 | return 1; |
491 | 482 | ||
492 | return 0; | 483 | return 0; |
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev) | |||
566 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) | 557 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) |
567 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | 558 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); |
568 | 559 | ||
569 | if (hdev->features[6] & LMP_SIMPLE_PAIR) { | 560 | if (lmp_ssp_capable(hdev)) { |
570 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { | 561 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { |
571 | u8 mode = 0x01; | 562 | u8 mode = 0x01; |
572 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, | 563 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, |
@@ -618,8 +609,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | |||
618 | hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); | 609 | hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); |
619 | 610 | ||
620 | BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, | 611 | BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, |
621 | hdev->manufacturer, | 612 | hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); |
622 | hdev->hci_ver, hdev->hci_rev); | ||
623 | 613 | ||
624 | if (test_bit(HCI_INIT, &hdev->flags)) | 614 | if (test_bit(HCI_INIT, &hdev->flags)) |
625 | hci_setup(hdev); | 615 | hci_setup(hdev); |
@@ -646,7 +636,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev) | |||
646 | hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); | 636 | hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); |
647 | } | 637 | } |
648 | 638 | ||
649 | static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) | 639 | static void hci_cc_read_local_commands(struct hci_dev *hdev, |
640 | struct sk_buff *skb) | ||
650 | { | 641 | { |
651 | struct hci_rp_read_local_commands *rp = (void *) skb->data; | 642 | struct hci_rp_read_local_commands *rp = (void *) skb->data; |
652 | 643 | ||
@@ -664,7 +655,8 @@ done: | |||
664 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); | 655 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); |
665 | } | 656 | } |
666 | 657 | ||
667 | static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) | 658 | static void hci_cc_read_local_features(struct hci_dev *hdev, |
659 | struct sk_buff *skb) | ||
668 | { | 660 | { |
669 | struct hci_rp_read_local_features *rp = (void *) skb->data; | 661 | struct hci_rp_read_local_features *rp = (void *) skb->data; |
670 | 662 | ||
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb | |||
713 | hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); | 705 | hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); |
714 | 706 | ||
715 | BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, | 707 | BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, |
716 | hdev->features[0], hdev->features[1], | 708 | hdev->features[0], hdev->features[1], |
717 | hdev->features[2], hdev->features[3], | 709 | hdev->features[2], hdev->features[3], |
718 | hdev->features[4], hdev->features[5], | 710 | hdev->features[4], hdev->features[5], |
719 | hdev->features[6], hdev->features[7]); | 711 | hdev->features[6], hdev->features[7]); |
720 | } | 712 | } |
721 | 713 | ||
722 | static void hci_set_le_support(struct hci_dev *hdev) | 714 | static void hci_set_le_support(struct hci_dev *hdev) |
@@ -736,7 +728,7 @@ static void hci_set_le_support(struct hci_dev *hdev) | |||
736 | } | 728 | } |
737 | 729 | ||
738 | static void hci_cc_read_local_ext_features(struct hci_dev *hdev, | 730 | static void hci_cc_read_local_ext_features(struct hci_dev *hdev, |
739 | struct sk_buff *skb) | 731 | struct sk_buff *skb) |
740 | { | 732 | { |
741 | struct hci_rp_read_local_ext_features *rp = (void *) skb->data; | 733 | struct hci_rp_read_local_ext_features *rp = (void *) skb->data; |
742 | 734 | ||
@@ -762,7 +754,7 @@ done: | |||
762 | } | 754 | } |
763 | 755 | ||
764 | static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, | 756 | static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, |
765 | struct sk_buff *skb) | 757 | struct sk_buff *skb) |
766 | { | 758 | { |
767 | struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; | 759 | struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; |
768 | 760 | ||
@@ -798,9 +790,8 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | |||
798 | hdev->acl_cnt = hdev->acl_pkts; | 790 | hdev->acl_cnt = hdev->acl_pkts; |
799 | hdev->sco_cnt = hdev->sco_pkts; | 791 | hdev->sco_cnt = hdev->sco_pkts; |
800 | 792 | ||
801 | BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, | 793 | BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, |
802 | hdev->acl_mtu, hdev->acl_pkts, | 794 | hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); |
803 | hdev->sco_mtu, hdev->sco_pkts); | ||
804 | } | 795 | } |
805 | 796 | ||
806 | static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) | 797 | static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -816,7 +807,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) | |||
816 | } | 807 | } |
817 | 808 | ||
818 | static void hci_cc_read_data_block_size(struct hci_dev *hdev, | 809 | static void hci_cc_read_data_block_size(struct hci_dev *hdev, |
819 | struct sk_buff *skb) | 810 | struct sk_buff *skb) |
820 | { | 811 | { |
821 | struct hci_rp_read_data_block_size *rp = (void *) skb->data; | 812 | struct hci_rp_read_data_block_size *rp = (void *) skb->data; |
822 | 813 | ||
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev, | |||
832 | hdev->block_cnt = hdev->num_blocks; | 823 | hdev->block_cnt = hdev->num_blocks; |
833 | 824 | ||
834 | BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, | 825 | BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, |
835 | hdev->block_cnt, hdev->block_len); | 826 | hdev->block_cnt, hdev->block_len); |
836 | 827 | ||
837 | hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); | 828 | hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); |
838 | } | 829 | } |
@@ -847,7 +838,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) | |||
847 | } | 838 | } |
848 | 839 | ||
849 | static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | 840 | static void hci_cc_read_local_amp_info(struct hci_dev *hdev, |
850 | struct sk_buff *skb) | 841 | struct sk_buff *skb) |
851 | { | 842 | { |
852 | struct hci_rp_read_local_amp_info *rp = (void *) skb->data; | 843 | struct hci_rp_read_local_amp_info *rp = (void *) skb->data; |
853 | 844 | ||
@@ -871,7 +862,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | |||
871 | } | 862 | } |
872 | 863 | ||
873 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, | 864 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, |
874 | struct sk_buff *skb) | 865 | struct sk_buff *skb) |
875 | { | 866 | { |
876 | __u8 status = *((__u8 *) skb->data); | 867 | __u8 status = *((__u8 *) skb->data); |
877 | 868 | ||
@@ -890,7 +881,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) | |||
890 | } | 881 | } |
891 | 882 | ||
892 | static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, | 883 | static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, |
893 | struct sk_buff *skb) | 884 | struct sk_buff *skb) |
894 | { | 885 | { |
895 | __u8 status = *((__u8 *) skb->data); | 886 | __u8 status = *((__u8 *) skb->data); |
896 | 887 | ||
@@ -900,7 +891,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, | |||
900 | } | 891 | } |
901 | 892 | ||
902 | static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, | 893 | static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, |
903 | struct sk_buff *skb) | 894 | struct sk_buff *skb) |
904 | { | 895 | { |
905 | struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; | 896 | struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; |
906 | 897 | ||
@@ -959,7 +950,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
959 | 950 | ||
960 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) | 951 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) |
961 | mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, | 952 | mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, |
962 | rp->status); | 953 | rp->status); |
963 | 954 | ||
964 | hci_dev_unlock(hdev); | 955 | hci_dev_unlock(hdev); |
965 | } | 956 | } |
@@ -1000,7 +991,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
1000 | } | 991 | } |
1001 | 992 | ||
1002 | static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, | 993 | static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, |
1003 | struct sk_buff *skb) | 994 | struct sk_buff *skb) |
1004 | { | 995 | { |
1005 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; | 996 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; |
1006 | 997 | ||
@@ -1031,7 +1022,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
1031 | } | 1022 | } |
1032 | 1023 | ||
1033 | static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, | 1024 | static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, |
1034 | struct sk_buff *skb) | 1025 | struct sk_buff *skb) |
1035 | { | 1026 | { |
1036 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; | 1027 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; |
1037 | 1028 | ||
@@ -1047,7 +1038,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, | |||
1047 | } | 1038 | } |
1048 | 1039 | ||
1049 | static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, | 1040 | static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, |
1050 | struct sk_buff *skb) | 1041 | struct sk_buff *skb) |
1051 | { | 1042 | { |
1052 | struct hci_rp_read_local_oob_data *rp = (void *) skb->data; | 1043 | struct hci_rp_read_local_oob_data *rp = (void *) skb->data; |
1053 | 1044 | ||
@@ -1076,7 +1067,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) | |||
1076 | } | 1067 | } |
1077 | 1068 | ||
1078 | static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, | 1069 | static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, |
1079 | struct sk_buff *skb) | 1070 | struct sk_buff *skb) |
1080 | { | 1071 | { |
1081 | struct hci_cp_le_set_scan_enable *cp; | 1072 | struct hci_cp_le_set_scan_enable *cp; |
1082 | __u8 status = *((__u8 *) skb->data); | 1073 | __u8 status = *((__u8 *) skb->data); |
@@ -1156,8 +1147,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
1156 | hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); | 1147 | hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); |
1157 | } | 1148 | } |
1158 | 1149 | ||
1159 | static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, | 1150 | static void hci_cc_write_le_host_supported(struct hci_dev *hdev, |
1160 | struct sk_buff *skb) | 1151 | struct sk_buff *skb) |
1161 | { | 1152 | { |
1162 | struct hci_cp_write_le_host_supported *sent; | 1153 | struct hci_cp_write_le_host_supported *sent; |
1163 | __u8 status = *((__u8 *) skb->data); | 1154 | __u8 status = *((__u8 *) skb->data); |
@@ -1176,13 +1167,13 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, | |||
1176 | } | 1167 | } |
1177 | 1168 | ||
1178 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && | 1169 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && |
1179 | !test_bit(HCI_INIT, &hdev->flags)) | 1170 | !test_bit(HCI_INIT, &hdev->flags)) |
1180 | mgmt_le_enable_complete(hdev, sent->le, status); | 1171 | mgmt_le_enable_complete(hdev, sent->le, status); |
1181 | 1172 | ||
1182 | hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); | 1173 | hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); |
1183 | } | 1174 | } |
1184 | 1175 | ||
1185 | static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | 1176 | static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) |
1186 | { | 1177 | { |
1187 | BT_DBG("%s status 0x%x", hdev->name, status); | 1178 | BT_DBG("%s status 0x%x", hdev->name, status); |
1188 | 1179 | ||
@@ -1203,7 +1194,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | |||
1203 | hci_dev_unlock(hdev); | 1194 | hci_dev_unlock(hdev); |
1204 | } | 1195 | } |
1205 | 1196 | ||
1206 | static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | 1197 | static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) |
1207 | { | 1198 | { |
1208 | struct hci_cp_create_conn *cp; | 1199 | struct hci_cp_create_conn *cp; |
1209 | struct hci_conn *conn; | 1200 | struct hci_conn *conn; |
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) | |||
1333 | } | 1324 | } |
1334 | 1325 | ||
1335 | static int hci_outgoing_auth_needed(struct hci_dev *hdev, | 1326 | static int hci_outgoing_auth_needed(struct hci_dev *hdev, |
1336 | struct hci_conn *conn) | 1327 | struct hci_conn *conn) |
1337 | { | 1328 | { |
1338 | if (conn->state != BT_CONFIG || !conn->out) | 1329 | if (conn->state != BT_CONFIG || !conn->out) |
1339 | return 0; | 1330 | return 0; |
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev, | |||
1343 | 1334 | ||
1344 | /* Only request authentication for SSP connections or non-SSP | 1335 | /* Only request authentication for SSP connections or non-SSP |
1345 | * devices with sec_level HIGH or if MITM protection is requested */ | 1336 | * devices with sec_level HIGH or if MITM protection is requested */ |
1346 | if (!hci_conn_ssp_enabled(conn) && | 1337 | if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && |
1347 | conn->pending_sec_level != BT_SECURITY_HIGH && | 1338 | conn->pending_sec_level != BT_SECURITY_HIGH) |
1348 | !(conn->auth_type & 0x01)) | ||
1349 | return 0; | 1339 | return 0; |
1350 | 1340 | ||
1351 | return 1; | 1341 | return 1; |
1352 | } | 1342 | } |
1353 | 1343 | ||
1354 | static inline int hci_resolve_name(struct hci_dev *hdev, | 1344 | static int hci_resolve_name(struct hci_dev *hdev, |
1355 | struct inquiry_entry *e) | 1345 | struct inquiry_entry *e) |
1356 | { | 1346 | { |
1357 | struct hci_cp_remote_name_req cp; | 1347 | struct hci_cp_remote_name_req cp; |
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) | |||
1638 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); | 1628 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); |
1639 | 1629 | ||
1640 | BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), | 1630 | BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), |
1641 | conn); | 1631 | conn); |
1642 | 1632 | ||
1643 | if (status) { | 1633 | if (status) { |
1644 | if (conn && conn->state == BT_CONNECT) { | 1634 | if (conn && conn->state == BT_CONNECT) { |
@@ -1668,7 +1658,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) | |||
1668 | BT_DBG("%s status 0x%x", hdev->name, status); | 1658 | BT_DBG("%s status 0x%x", hdev->name, status); |
1669 | } | 1659 | } |
1670 | 1660 | ||
1671 | static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1661 | static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1672 | { | 1662 | { |
1673 | __u8 status = *((__u8 *) skb->data); | 1663 | __u8 status = *((__u8 *) skb->data); |
1674 | struct discovery_state *discov = &hdev->discovery; | 1664 | struct discovery_state *discov = &hdev->discovery; |
@@ -1708,7 +1698,7 @@ unlock: | |||
1708 | hci_dev_unlock(hdev); | 1698 | hci_dev_unlock(hdev); |
1709 | } | 1699 | } |
1710 | 1700 | ||
1711 | static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1701 | static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1712 | { | 1702 | { |
1713 | struct inquiry_data data; | 1703 | struct inquiry_data data; |
1714 | struct inquiry_info *info = (void *) (skb->data + 1); | 1704 | struct inquiry_info *info = (void *) (skb->data + 1); |
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * | |||
1745 | hci_dev_unlock(hdev); | 1735 | hci_dev_unlock(hdev); |
1746 | } | 1736 | } |
1747 | 1737 | ||
1748 | static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1738 | static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1749 | { | 1739 | { |
1750 | struct hci_ev_conn_complete *ev = (void *) skb->data; | 1740 | struct hci_ev_conn_complete *ev = (void *) skb->data; |
1751 | struct hci_conn *conn; | 1741 | struct hci_conn *conn; |
@@ -1823,18 +1813,18 @@ unlock: | |||
1823 | hci_conn_check_pending(hdev); | 1813 | hci_conn_check_pending(hdev); |
1824 | } | 1814 | } |
1825 | 1815 | ||
1826 | static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1816 | static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1827 | { | 1817 | { |
1828 | struct hci_ev_conn_request *ev = (void *) skb->data; | 1818 | struct hci_ev_conn_request *ev = (void *) skb->data; |
1829 | int mask = hdev->link_mode; | 1819 | int mask = hdev->link_mode; |
1830 | 1820 | ||
1831 | BT_DBG("%s bdaddr %s type 0x%x", hdev->name, | 1821 | BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), |
1832 | batostr(&ev->bdaddr), ev->link_type); | 1822 | ev->link_type); |
1833 | 1823 | ||
1834 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); | 1824 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); |
1835 | 1825 | ||
1836 | if ((mask & HCI_LM_ACCEPT) && | 1826 | if ((mask & HCI_LM_ACCEPT) && |
1837 | !hci_blacklist_lookup(hdev, &ev->bdaddr)) { | 1827 | !hci_blacklist_lookup(hdev, &ev->bdaddr)) { |
1838 | /* Connection accepted */ | 1828 | /* Connection accepted */ |
1839 | struct inquiry_entry *ie; | 1829 | struct inquiry_entry *ie; |
1840 | struct hci_conn *conn; | 1830 | struct hci_conn *conn; |
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1845 | if (ie) | 1835 | if (ie) |
1846 | memcpy(ie->data.dev_class, ev->dev_class, 3); | 1836 | memcpy(ie->data.dev_class, ev->dev_class, 3); |
1847 | 1837 | ||
1848 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); | 1838 | conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, |
1839 | &ev->bdaddr); | ||
1849 | if (!conn) { | 1840 | if (!conn) { |
1850 | conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); | 1841 | conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); |
1851 | if (!conn) { | 1842 | if (!conn) { |
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1878 | bacpy(&cp.bdaddr, &ev->bdaddr); | 1869 | bacpy(&cp.bdaddr, &ev->bdaddr); |
1879 | cp.pkt_type = cpu_to_le16(conn->pkt_type); | 1870 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
1880 | 1871 | ||
1881 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); | 1872 | cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); |
1882 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); | 1873 | cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); |
1883 | cp.max_latency = cpu_to_le16(0xffff); | 1874 | cp.max_latency = __constant_cpu_to_le16(0xffff); |
1884 | cp.content_format = cpu_to_le16(hdev->voice_setting); | 1875 | cp.content_format = cpu_to_le16(hdev->voice_setting); |
1885 | cp.retrans_effort = 0xff; | 1876 | cp.retrans_effort = 0xff; |
1886 | 1877 | ||
@@ -1897,7 +1888,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1897 | } | 1888 | } |
1898 | } | 1889 | } |
1899 | 1890 | ||
1900 | static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1891 | static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1901 | { | 1892 | { |
1902 | struct hci_ev_disconn_complete *ev = (void *) skb->data; | 1893 | struct hci_ev_disconn_complete *ev = (void *) skb->data; |
1903 | struct hci_conn *conn; | 1894 | struct hci_conn *conn; |
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
1914 | conn->state = BT_CLOSED; | 1905 | conn->state = BT_CLOSED; |
1915 | 1906 | ||
1916 | if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && | 1907 | if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && |
1917 | (conn->type == ACL_LINK || conn->type == LE_LINK)) { | 1908 | (conn->type == ACL_LINK || conn->type == LE_LINK)) { |
1918 | if (ev->status != 0) | 1909 | if (ev->status != 0) |
1919 | mgmt_disconnect_failed(hdev, &conn->dst, conn->type, | 1910 | mgmt_disconnect_failed(hdev, &conn->dst, conn->type, |
1920 | conn->dst_type, ev->status); | 1911 | conn->dst_type, ev->status); |
1921 | else | 1912 | else |
1922 | mgmt_device_disconnected(hdev, &conn->dst, conn->type, | 1913 | mgmt_device_disconnected(hdev, &conn->dst, conn->type, |
1923 | conn->dst_type); | 1914 | conn->dst_type); |
@@ -1934,7 +1925,7 @@ unlock: | |||
1934 | hci_dev_unlock(hdev); | 1925 | hci_dev_unlock(hdev); |
1935 | } | 1926 | } |
1936 | 1927 | ||
1937 | static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1928 | static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1938 | { | 1929 | { |
1939 | struct hci_ev_auth_complete *ev = (void *) skb->data; | 1930 | struct hci_ev_auth_complete *ev = (void *) skb->data; |
1940 | struct hci_conn *conn; | 1931 | struct hci_conn *conn; |
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1949 | 1940 | ||
1950 | if (!ev->status) { | 1941 | if (!ev->status) { |
1951 | if (!hci_conn_ssp_enabled(conn) && | 1942 | if (!hci_conn_ssp_enabled(conn) && |
1952 | test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { | 1943 | test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { |
1953 | BT_INFO("re-auth of legacy device is not possible."); | 1944 | BT_INFO("re-auth of legacy device is not possible."); |
1954 | } else { | 1945 | } else { |
1955 | conn->link_mode |= HCI_LM_AUTH; | 1946 | conn->link_mode |= HCI_LM_AUTH; |
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1969 | cp.handle = ev->handle; | 1960 | cp.handle = ev->handle; |
1970 | cp.encrypt = 0x01; | 1961 | cp.encrypt = 0x01; |
1971 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), | 1962 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), |
1972 | &cp); | 1963 | &cp); |
1973 | } else { | 1964 | } else { |
1974 | conn->state = BT_CONNECTED; | 1965 | conn->state = BT_CONNECTED; |
1975 | hci_proto_connect_cfm(conn, ev->status); | 1966 | hci_proto_connect_cfm(conn, ev->status); |
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1989 | cp.handle = ev->handle; | 1980 | cp.handle = ev->handle; |
1990 | cp.encrypt = 0x01; | 1981 | cp.encrypt = 0x01; |
1991 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), | 1982 | hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), |
1992 | &cp); | 1983 | &cp); |
1993 | } else { | 1984 | } else { |
1994 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | 1985 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); |
1995 | hci_encrypt_cfm(conn, ev->status, 0x00); | 1986 | hci_encrypt_cfm(conn, ev->status, 0x00); |
@@ -2000,7 +1991,7 @@ unlock: | |||
2000 | hci_dev_unlock(hdev); | 1991 | hci_dev_unlock(hdev); |
2001 | } | 1992 | } |
2002 | 1993 | ||
2003 | static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1994 | static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2004 | { | 1995 | { |
2005 | struct hci_ev_remote_name *ev = (void *) skb->data; | 1996 | struct hci_ev_remote_name *ev = (void *) skb->data; |
2006 | struct hci_conn *conn; | 1997 | struct hci_conn *conn; |
@@ -2039,7 +2030,7 @@ unlock: | |||
2039 | hci_dev_unlock(hdev); | 2030 | hci_dev_unlock(hdev); |
2040 | } | 2031 | } |
2041 | 2032 | ||
2042 | static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2033 | static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2043 | { | 2034 | { |
2044 | struct hci_ev_encrypt_change *ev = (void *) skb->data; | 2035 | struct hci_ev_encrypt_change *ev = (void *) skb->data; |
2045 | struct hci_conn *conn; | 2036 | struct hci_conn *conn; |
@@ -2082,7 +2073,8 @@ unlock: | |||
2082 | hci_dev_unlock(hdev); | 2073 | hci_dev_unlock(hdev); |
2083 | } | 2074 | } |
2084 | 2075 | ||
2085 | static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2076 | static void hci_change_link_key_complete_evt(struct hci_dev *hdev, |
2077 | struct sk_buff *skb) | ||
2086 | { | 2078 | { |
2087 | struct hci_ev_change_link_key_complete *ev = (void *) skb->data; | 2079 | struct hci_ev_change_link_key_complete *ev = (void *) skb->data; |
2088 | struct hci_conn *conn; | 2080 | struct hci_conn *conn; |
@@ -2104,7 +2096,8 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct | |||
2104 | hci_dev_unlock(hdev); | 2096 | hci_dev_unlock(hdev); |
2105 | } | 2097 | } |
2106 | 2098 | ||
2107 | static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2099 | static void hci_remote_features_evt(struct hci_dev *hdev, |
2100 | struct sk_buff *skb) | ||
2108 | { | 2101 | { |
2109 | struct hci_ev_remote_features *ev = (void *) skb->data; | 2102 | struct hci_ev_remote_features *ev = (void *) skb->data; |
2110 | struct hci_conn *conn; | 2103 | struct hci_conn *conn; |
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff | |||
2128 | cp.handle = ev->handle; | 2121 | cp.handle = ev->handle; |
2129 | cp.page = 0x01; | 2122 | cp.page = 0x01; |
2130 | hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, | 2123 | hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, |
2131 | sizeof(cp), &cp); | 2124 | sizeof(cp), &cp); |
2132 | goto unlock; | 2125 | goto unlock; |
2133 | } | 2126 | } |
2134 | 2127 | ||
@@ -2153,17 +2146,18 @@ unlock: | |||
2153 | hci_dev_unlock(hdev); | 2146 | hci_dev_unlock(hdev); |
2154 | } | 2147 | } |
2155 | 2148 | ||
2156 | static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2149 | static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2157 | { | 2150 | { |
2158 | BT_DBG("%s", hdev->name); | 2151 | BT_DBG("%s", hdev->name); |
2159 | } | 2152 | } |
2160 | 2153 | ||
2161 | static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2154 | static void hci_qos_setup_complete_evt(struct hci_dev *hdev, |
2155 | struct sk_buff *skb) | ||
2162 | { | 2156 | { |
2163 | BT_DBG("%s", hdev->name); | 2157 | BT_DBG("%s", hdev->name); |
2164 | } | 2158 | } |
2165 | 2159 | ||
2166 | static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2160 | static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2167 | { | 2161 | { |
2168 | struct hci_ev_cmd_complete *ev = (void *) skb->data; | 2162 | struct hci_ev_cmd_complete *ev = (void *) skb->data; |
2169 | __u16 opcode; | 2163 | __u16 opcode; |
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
2384 | } | 2378 | } |
2385 | } | 2379 | } |
2386 | 2380 | ||
2387 | static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2381 | static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2388 | { | 2382 | { |
2389 | struct hci_ev_cmd_status *ev = (void *) skb->data; | 2383 | struct hci_ev_cmd_status *ev = (void *) skb->data; |
2390 | __u16 opcode; | 2384 | __u16 opcode; |
@@ -2465,7 +2459,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2465 | } | 2459 | } |
2466 | } | 2460 | } |
2467 | 2461 | ||
2468 | static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2462 | static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2469 | { | 2463 | { |
2470 | struct hci_ev_role_change *ev = (void *) skb->data; | 2464 | struct hci_ev_role_change *ev = (void *) skb->data; |
2471 | struct hci_conn *conn; | 2465 | struct hci_conn *conn; |
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
2491 | hci_dev_unlock(hdev); | 2485 | hci_dev_unlock(hdev); |
2492 | } | 2486 | } |
2493 | 2487 | ||
2494 | static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2488 | static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2495 | { | 2489 | { |
2496 | struct hci_ev_num_comp_pkts *ev = (void *) skb->data; | 2490 | struct hci_ev_num_comp_pkts *ev = (void *) skb->data; |
2497 | int i; | 2491 | int i; |
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s | |||
2502 | } | 2496 | } |
2503 | 2497 | ||
2504 | if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + | 2498 | if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + |
2505 | ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { | 2499 | ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { |
2506 | BT_DBG("%s bad parameters", hdev->name); | 2500 | BT_DBG("%s bad parameters", hdev->name); |
2507 | return; | 2501 | return; |
2508 | } | 2502 | } |
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s | |||
2557 | queue_work(hdev->workqueue, &hdev->tx_work); | 2551 | queue_work(hdev->workqueue, &hdev->tx_work); |
2558 | } | 2552 | } |
2559 | 2553 | ||
2560 | static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, | 2554 | static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2561 | struct sk_buff *skb) | ||
2562 | { | 2555 | { |
2563 | struct hci_ev_num_comp_blocks *ev = (void *) skb->data; | 2556 | struct hci_ev_num_comp_blocks *ev = (void *) skb->data; |
2564 | int i; | 2557 | int i; |
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, | |||
2569 | } | 2562 | } |
2570 | 2563 | ||
2571 | if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + | 2564 | if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + |
2572 | ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { | 2565 | ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { |
2573 | BT_DBG("%s bad parameters", hdev->name); | 2566 | BT_DBG("%s bad parameters", hdev->name); |
2574 | return; | 2567 | return; |
2575 | } | 2568 | } |
2576 | 2569 | ||
2577 | BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, | 2570 | BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, |
2578 | ev->num_hndl); | 2571 | ev->num_hndl); |
2579 | 2572 | ||
2580 | for (i = 0; i < ev->num_hndl; i++) { | 2573 | for (i = 0; i < ev->num_hndl; i++) { |
2581 | struct hci_comp_blocks_info *info = &ev->handles[i]; | 2574 | struct hci_comp_blocks_info *info = &ev->handles[i]; |
@@ -2607,7 +2600,7 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, | |||
2607 | queue_work(hdev->workqueue, &hdev->tx_work); | 2600 | queue_work(hdev->workqueue, &hdev->tx_work); |
2608 | } | 2601 | } |
2609 | 2602 | ||
2610 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2603 | static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2611 | { | 2604 | { |
2612 | struct hci_ev_mode_change *ev = (void *) skb->data; | 2605 | struct hci_ev_mode_change *ev = (void *) skb->data; |
2613 | struct hci_conn *conn; | 2606 | struct hci_conn *conn; |
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
2621 | conn->mode = ev->mode; | 2614 | conn->mode = ev->mode; |
2622 | conn->interval = __le16_to_cpu(ev->interval); | 2615 | conn->interval = __le16_to_cpu(ev->interval); |
2623 | 2616 | ||
2624 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { | 2617 | if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, |
2618 | &conn->flags)) { | ||
2625 | if (conn->mode == HCI_CM_ACTIVE) | 2619 | if (conn->mode == HCI_CM_ACTIVE) |
2626 | set_bit(HCI_CONN_POWER_SAVE, &conn->flags); | 2620 | set_bit(HCI_CONN_POWER_SAVE, &conn->flags); |
2627 | else | 2621 | else |
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
2635 | hci_dev_unlock(hdev); | 2629 | hci_dev_unlock(hdev); |
2636 | } | 2630 | } |
2637 | 2631 | ||
2638 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2632 | static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2639 | { | 2633 | { |
2640 | struct hci_ev_pin_code_req *ev = (void *) skb->data; | 2634 | struct hci_ev_pin_code_req *ev = (void *) skb->data; |
2641 | struct hci_conn *conn; | 2635 | struct hci_conn *conn; |
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2656 | 2650 | ||
2657 | if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) | 2651 | if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) |
2658 | hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, | 2652 | hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, |
2659 | sizeof(ev->bdaddr), &ev->bdaddr); | 2653 | sizeof(ev->bdaddr), &ev->bdaddr); |
2660 | else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { | 2654 | else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { |
2661 | u8 secure; | 2655 | u8 secure; |
2662 | 2656 | ||
@@ -2672,7 +2666,7 @@ unlock: | |||
2672 | hci_dev_unlock(hdev); | 2666 | hci_dev_unlock(hdev); |
2673 | } | 2667 | } |
2674 | 2668 | ||
2675 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2669 | static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2676 | { | 2670 | { |
2677 | struct hci_ev_link_key_req *ev = (void *) skb->data; | 2671 | struct hci_ev_link_key_req *ev = (void *) skb->data; |
2678 | struct hci_cp_link_key_reply cp; | 2672 | struct hci_cp_link_key_reply cp; |
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2689 | key = hci_find_link_key(hdev, &ev->bdaddr); | 2683 | key = hci_find_link_key(hdev, &ev->bdaddr); |
2690 | if (!key) { | 2684 | if (!key) { |
2691 | BT_DBG("%s link key not found for %s", hdev->name, | 2685 | BT_DBG("%s link key not found for %s", hdev->name, |
2692 | batostr(&ev->bdaddr)); | 2686 | batostr(&ev->bdaddr)); |
2693 | goto not_found; | 2687 | goto not_found; |
2694 | } | 2688 | } |
2695 | 2689 | ||
2696 | BT_DBG("%s found key type %u for %s", hdev->name, key->type, | 2690 | BT_DBG("%s found key type %u for %s", hdev->name, key->type, |
2697 | batostr(&ev->bdaddr)); | 2691 | batostr(&ev->bdaddr)); |
2698 | 2692 | ||
2699 | if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && | 2693 | if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && |
2700 | key->type == HCI_LK_DEBUG_COMBINATION) { | 2694 | key->type == HCI_LK_DEBUG_COMBINATION) { |
2701 | BT_DBG("%s ignoring debug key", hdev->name); | 2695 | BT_DBG("%s ignoring debug key", hdev->name); |
2702 | goto not_found; | 2696 | goto not_found; |
2703 | } | 2697 | } |
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2705 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | 2699 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); |
2706 | if (conn) { | 2700 | if (conn) { |
2707 | if (key->type == HCI_LK_UNAUTH_COMBINATION && | 2701 | if (key->type == HCI_LK_UNAUTH_COMBINATION && |
2708 | conn->auth_type != 0xff && | 2702 | conn->auth_type != 0xff && (conn->auth_type & 0x01)) { |
2709 | (conn->auth_type & 0x01)) { | ||
2710 | BT_DBG("%s ignoring unauthenticated key", hdev->name); | 2703 | BT_DBG("%s ignoring unauthenticated key", hdev->name); |
2711 | goto not_found; | 2704 | goto not_found; |
2712 | } | 2705 | } |
2713 | 2706 | ||
2714 | if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && | 2707 | if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && |
2715 | conn->pending_sec_level == BT_SECURITY_HIGH) { | 2708 | conn->pending_sec_level == BT_SECURITY_HIGH) { |
2716 | BT_DBG("%s ignoring key unauthenticated for high \ | 2709 | BT_DBG("%s ignoring key unauthenticated for high security", |
2717 | security", hdev->name); | 2710 | hdev->name); |
2718 | goto not_found; | 2711 | goto not_found; |
2719 | } | 2712 | } |
2720 | 2713 | ||
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2723 | } | 2716 | } |
2724 | 2717 | ||
2725 | bacpy(&cp.bdaddr, &ev->bdaddr); | 2718 | bacpy(&cp.bdaddr, &ev->bdaddr); |
2726 | memcpy(cp.link_key, key->val, 16); | 2719 | memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); |
2727 | 2720 | ||
2728 | hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); | 2721 | hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); |
2729 | 2722 | ||
@@ -2736,7 +2729,7 @@ not_found: | |||
2736 | hci_dev_unlock(hdev); | 2729 | hci_dev_unlock(hdev); |
2737 | } | 2730 | } |
2738 | 2731 | ||
2739 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2732 | static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2740 | { | 2733 | { |
2741 | struct hci_ev_link_key_notify *ev = (void *) skb->data; | 2734 | struct hci_ev_link_key_notify *ev = (void *) skb->data; |
2742 | struct hci_conn *conn; | 2735 | struct hci_conn *conn; |
@@ -2760,12 +2753,12 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff | |||
2760 | 2753 | ||
2761 | if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) | 2754 | if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) |
2762 | hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, | 2755 | hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, |
2763 | ev->key_type, pin_len); | 2756 | ev->key_type, pin_len); |
2764 | 2757 | ||
2765 | hci_dev_unlock(hdev); | 2758 | hci_dev_unlock(hdev); |
2766 | } | 2759 | } |
2767 | 2760 | ||
2768 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2761 | static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2769 | { | 2762 | { |
2770 | struct hci_ev_clock_offset *ev = (void *) skb->data; | 2763 | struct hci_ev_clock_offset *ev = (void *) skb->data; |
2771 | struct hci_conn *conn; | 2764 | struct hci_conn *conn; |
@@ -2788,7 +2781,7 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
2788 | hci_dev_unlock(hdev); | 2781 | hci_dev_unlock(hdev); |
2789 | } | 2782 | } |
2790 | 2783 | ||
2791 | static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2784 | static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2792 | { | 2785 | { |
2793 | struct hci_ev_pkt_type_change *ev = (void *) skb->data; | 2786 | struct hci_ev_pkt_type_change *ev = (void *) skb->data; |
2794 | struct hci_conn *conn; | 2787 | struct hci_conn *conn; |
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff | |||
2804 | hci_dev_unlock(hdev); | 2797 | hci_dev_unlock(hdev); |
2805 | } | 2798 | } |
2806 | 2799 | ||
2807 | static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2800 | static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2808 | { | 2801 | { |
2809 | struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; | 2802 | struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; |
2810 | struct inquiry_entry *ie; | 2803 | struct inquiry_entry *ie; |
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff * | |||
2822 | hci_dev_unlock(hdev); | 2815 | hci_dev_unlock(hdev); |
2823 | } | 2816 | } |
2824 | 2817 | ||
2825 | static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2818 | static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, |
2819 | struct sk_buff *skb) | ||
2826 | { | 2820 | { |
2827 | struct inquiry_data data; | 2821 | struct inquiry_data data; |
2828 | int num_rsp = *((__u8 *) skb->data); | 2822 | int num_rsp = *((__u8 *) skb->data); |
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
2881 | hci_dev_unlock(hdev); | 2875 | hci_dev_unlock(hdev); |
2882 | } | 2876 | } |
2883 | 2877 | ||
2884 | static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2878 | static void hci_remote_ext_features_evt(struct hci_dev *hdev, |
2879 | struct sk_buff *skb) | ||
2885 | { | 2880 | { |
2886 | struct hci_ev_remote_ext_features *ev = (void *) skb->data; | 2881 | struct hci_ev_remote_ext_features *ev = (void *) skb->data; |
2887 | struct hci_conn *conn; | 2882 | struct hci_conn *conn; |
@@ -2929,7 +2924,8 @@ unlock: | |||
2929 | hci_dev_unlock(hdev); | 2924 | hci_dev_unlock(hdev); |
2930 | } | 2925 | } |
2931 | 2926 | ||
2932 | static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2927 | static void hci_sync_conn_complete_evt(struct hci_dev *hdev, |
2928 | struct sk_buff *skb) | ||
2933 | { | 2929 | { |
2934 | struct hci_ev_sync_conn_complete *ev = (void *) skb->data; | 2930 | struct hci_ev_sync_conn_complete *ev = (void *) skb->data; |
2935 | struct hci_conn *conn; | 2931 | struct hci_conn *conn; |
@@ -2984,19 +2980,20 @@ unlock: | |||
2984 | hci_dev_unlock(hdev); | 2980 | hci_dev_unlock(hdev); |
2985 | } | 2981 | } |
2986 | 2982 | ||
2987 | static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2983 | static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2988 | { | 2984 | { |
2989 | BT_DBG("%s", hdev->name); | 2985 | BT_DBG("%s", hdev->name); |
2990 | } | 2986 | } |
2991 | 2987 | ||
2992 | static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2988 | static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2993 | { | 2989 | { |
2994 | struct hci_ev_sniff_subrate *ev = (void *) skb->data; | 2990 | struct hci_ev_sniff_subrate *ev = (void *) skb->data; |
2995 | 2991 | ||
2996 | BT_DBG("%s status %d", hdev->name, ev->status); | 2992 | BT_DBG("%s status %d", hdev->name, ev->status); |
2997 | } | 2993 | } |
2998 | 2994 | ||
2999 | static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2995 | static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, |
2996 | struct sk_buff *skb) | ||
3000 | { | 2997 | { |
3001 | struct inquiry_data data; | 2998 | struct inquiry_data data; |
3002 | struct extended_inquiry_info *info = (void *) (skb->data + 1); | 2999 | struct extended_inquiry_info *info = (void *) (skb->data + 1); |
@@ -3043,7 +3040,51 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
3043 | hci_dev_unlock(hdev); | 3040 | hci_dev_unlock(hdev); |
3044 | } | 3041 | } |
3045 | 3042 | ||
3046 | static inline u8 hci_get_auth_req(struct hci_conn *conn) | 3043 | static void hci_key_refresh_complete_evt(struct hci_dev *hdev, |
3044 | struct sk_buff *skb) | ||
3045 | { | ||
3046 | struct hci_ev_key_refresh_complete *ev = (void *) skb->data; | ||
3047 | struct hci_conn *conn; | ||
3048 | |||
3049 | BT_DBG("%s status %u handle %u", hdev->name, ev->status, | ||
3050 | __le16_to_cpu(ev->handle)); | ||
3051 | |||
3052 | hci_dev_lock(hdev); | ||
3053 | |||
3054 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | ||
3055 | if (!conn) | ||
3056 | goto unlock; | ||
3057 | |||
3058 | if (!ev->status) | ||
3059 | conn->sec_level = conn->pending_sec_level; | ||
3060 | |||
3061 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | ||
3062 | |||
3063 | if (ev->status && conn->state == BT_CONNECTED) { | ||
3064 | hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); | ||
3065 | hci_conn_put(conn); | ||
3066 | goto unlock; | ||
3067 | } | ||
3068 | |||
3069 | if (conn->state == BT_CONFIG) { | ||
3070 | if (!ev->status) | ||
3071 | conn->state = BT_CONNECTED; | ||
3072 | |||
3073 | hci_proto_connect_cfm(conn, ev->status); | ||
3074 | hci_conn_put(conn); | ||
3075 | } else { | ||
3076 | hci_auth_cfm(conn, ev->status); | ||
3077 | |||
3078 | hci_conn_hold(conn); | ||
3079 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
3080 | hci_conn_put(conn); | ||
3081 | } | ||
3082 | |||
3083 | unlock: | ||
3084 | hci_dev_unlock(hdev); | ||
3085 | } | ||
3086 | |||
3087 | static u8 hci_get_auth_req(struct hci_conn *conn) | ||
3047 | { | 3088 | { |
3048 | /* If remote requests dedicated bonding follow that lead */ | 3089 | /* If remote requests dedicated bonding follow that lead */ |
3049 | if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { | 3090 | if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { |
@@ -3062,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn) | |||
3062 | return conn->auth_type; | 3103 | return conn->auth_type; |
3063 | } | 3104 | } |
3064 | 3105 | ||
3065 | static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3106 | static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3066 | { | 3107 | { |
3067 | struct hci_ev_io_capa_request *ev = (void *) skb->data; | 3108 | struct hci_ev_io_capa_request *ev = (void *) skb->data; |
3068 | struct hci_conn *conn; | 3109 | struct hci_conn *conn; |
@@ -3081,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff | |||
3081 | goto unlock; | 3122 | goto unlock; |
3082 | 3123 | ||
3083 | if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || | 3124 | if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || |
3084 | (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { | 3125 | (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { |
3085 | struct hci_cp_io_capability_reply cp; | 3126 | struct hci_cp_io_capability_reply cp; |
3086 | 3127 | ||
3087 | bacpy(&cp.bdaddr, &ev->bdaddr); | 3128 | bacpy(&cp.bdaddr, &ev->bdaddr); |
@@ -3092,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff | |||
3092 | conn->auth_type = hci_get_auth_req(conn); | 3133 | conn->auth_type = hci_get_auth_req(conn); |
3093 | cp.authentication = conn->auth_type; | 3134 | cp.authentication = conn->auth_type; |
3094 | 3135 | ||
3095 | if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && | 3136 | if (hci_find_remote_oob_data(hdev, &conn->dst) && |
3096 | hci_find_remote_oob_data(hdev, &conn->dst)) | 3137 | (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) |
3097 | cp.oob_data = 0x01; | 3138 | cp.oob_data = 0x01; |
3098 | else | 3139 | else |
3099 | cp.oob_data = 0x00; | 3140 | cp.oob_data = 0x00; |
3100 | 3141 | ||
3101 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, | 3142 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, |
3102 | sizeof(cp), &cp); | 3143 | sizeof(cp), &cp); |
3103 | } else { | 3144 | } else { |
3104 | struct hci_cp_io_capability_neg_reply cp; | 3145 | struct hci_cp_io_capability_neg_reply cp; |
3105 | 3146 | ||
@@ -3107,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff | |||
3107 | cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; | 3148 | cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; |
3108 | 3149 | ||
3109 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, | 3150 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, |
3110 | sizeof(cp), &cp); | 3151 | sizeof(cp), &cp); |
3111 | } | 3152 | } |
3112 | 3153 | ||
3113 | unlock: | 3154 | unlock: |
3114 | hci_dev_unlock(hdev); | 3155 | hci_dev_unlock(hdev); |
3115 | } | 3156 | } |
3116 | 3157 | ||
3117 | static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3158 | static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3118 | { | 3159 | { |
3119 | struct hci_ev_io_capa_reply *ev = (void *) skb->data; | 3160 | struct hci_ev_io_capa_reply *ev = (void *) skb->data; |
3120 | struct hci_conn *conn; | 3161 | struct hci_conn *conn; |
@@ -3136,8 +3177,8 @@ unlock: | |||
3136 | hci_dev_unlock(hdev); | 3177 | hci_dev_unlock(hdev); |
3137 | } | 3178 | } |
3138 | 3179 | ||
3139 | static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, | 3180 | static void hci_user_confirm_request_evt(struct hci_dev *hdev, |
3140 | struct sk_buff *skb) | 3181 | struct sk_buff *skb) |
3141 | { | 3182 | { |
3142 | struct hci_ev_user_confirm_req *ev = (void *) skb->data; | 3183 | struct hci_ev_user_confirm_req *ev = (void *) skb->data; |
3143 | int loc_mitm, rem_mitm, confirm_hint = 0; | 3184 | int loc_mitm, rem_mitm, confirm_hint = 0; |
@@ -3165,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, | |||
3165 | if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { | 3206 | if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { |
3166 | BT_DBG("Rejecting request: remote device can't provide MITM"); | 3207 | BT_DBG("Rejecting request: remote device can't provide MITM"); |
3167 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, | 3208 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, |
3168 | sizeof(ev->bdaddr), &ev->bdaddr); | 3209 | sizeof(ev->bdaddr), &ev->bdaddr); |
3169 | goto unlock; | 3210 | goto unlock; |
3170 | } | 3211 | } |
3171 | 3212 | ||
3172 | /* If no side requires MITM protection; auto-accept */ | 3213 | /* If no side requires MITM protection; auto-accept */ |
3173 | if ((!loc_mitm || conn->remote_cap == 0x03) && | 3214 | if ((!loc_mitm || conn->remote_cap == 0x03) && |
3174 | (!rem_mitm || conn->io_capability == 0x03)) { | 3215 | (!rem_mitm || conn->io_capability == 0x03)) { |
3175 | 3216 | ||
3176 | /* If we're not the initiators request authorization to | 3217 | /* If we're not the initiators request authorization to |
3177 | * proceed from user space (mgmt_user_confirm with | 3218 | * proceed from user space (mgmt_user_confirm with |
@@ -3183,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, | |||
3183 | } | 3224 | } |
3184 | 3225 | ||
3185 | BT_DBG("Auto-accept of user confirmation with %ums delay", | 3226 | BT_DBG("Auto-accept of user confirmation with %ums delay", |
3186 | hdev->auto_accept_delay); | 3227 | hdev->auto_accept_delay); |
3187 | 3228 | ||
3188 | if (hdev->auto_accept_delay > 0) { | 3229 | if (hdev->auto_accept_delay > 0) { |
3189 | int delay = msecs_to_jiffies(hdev->auto_accept_delay); | 3230 | int delay = msecs_to_jiffies(hdev->auto_accept_delay); |
@@ -3192,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, | |||
3192 | } | 3233 | } |
3193 | 3234 | ||
3194 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, | 3235 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, |
3195 | sizeof(ev->bdaddr), &ev->bdaddr); | 3236 | sizeof(ev->bdaddr), &ev->bdaddr); |
3196 | goto unlock; | 3237 | goto unlock; |
3197 | } | 3238 | } |
3198 | 3239 | ||
@@ -3204,8 +3245,8 @@ unlock: | |||
3204 | hci_dev_unlock(hdev); | 3245 | hci_dev_unlock(hdev); |
3205 | } | 3246 | } |
3206 | 3247 | ||
3207 | static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, | 3248 | static void hci_user_passkey_request_evt(struct hci_dev *hdev, |
3208 | struct sk_buff *skb) | 3249 | struct sk_buff *skb) |
3209 | { | 3250 | { |
3210 | struct hci_ev_user_passkey_req *ev = (void *) skb->data; | 3251 | struct hci_ev_user_passkey_req *ev = (void *) skb->data; |
3211 | 3252 | ||
@@ -3219,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, | |||
3219 | hci_dev_unlock(hdev); | 3260 | hci_dev_unlock(hdev); |
3220 | } | 3261 | } |
3221 | 3262 | ||
3222 | static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3263 | static void hci_simple_pair_complete_evt(struct hci_dev *hdev, |
3264 | struct sk_buff *skb) | ||
3223 | { | 3265 | { |
3224 | struct hci_ev_simple_pair_complete *ev = (void *) skb->data; | 3266 | struct hci_ev_simple_pair_complete *ev = (void *) skb->data; |
3225 | struct hci_conn *conn; | 3267 | struct hci_conn *conn; |
@@ -3247,7 +3289,8 @@ unlock: | |||
3247 | hci_dev_unlock(hdev); | 3289 | hci_dev_unlock(hdev); |
3248 | } | 3290 | } |
3249 | 3291 | ||
3250 | static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3292 | static void hci_remote_host_features_evt(struct hci_dev *hdev, |
3293 | struct sk_buff *skb) | ||
3251 | { | 3294 | { |
3252 | struct hci_ev_remote_host_features *ev = (void *) skb->data; | 3295 | struct hci_ev_remote_host_features *ev = (void *) skb->data; |
3253 | struct inquiry_entry *ie; | 3296 | struct inquiry_entry *ie; |
@@ -3263,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_ | |||
3263 | hci_dev_unlock(hdev); | 3306 | hci_dev_unlock(hdev); |
3264 | } | 3307 | } |
3265 | 3308 | ||
3266 | static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, | 3309 | static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, |
3267 | struct sk_buff *skb) | 3310 | struct sk_buff *skb) |
3268 | { | 3311 | { |
3269 | struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; | 3312 | struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; |
3270 | struct oob_data *data; | 3313 | struct oob_data *data; |
@@ -3285,20 +3328,20 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, | |||
3285 | memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); | 3328 | memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); |
3286 | 3329 | ||
3287 | hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), | 3330 | hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), |
3288 | &cp); | 3331 | &cp); |
3289 | } else { | 3332 | } else { |
3290 | struct hci_cp_remote_oob_data_neg_reply cp; | 3333 | struct hci_cp_remote_oob_data_neg_reply cp; |
3291 | 3334 | ||
3292 | bacpy(&cp.bdaddr, &ev->bdaddr); | 3335 | bacpy(&cp.bdaddr, &ev->bdaddr); |
3293 | hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), | 3336 | hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), |
3294 | &cp); | 3337 | &cp); |
3295 | } | 3338 | } |
3296 | 3339 | ||
3297 | unlock: | 3340 | unlock: |
3298 | hci_dev_unlock(hdev); | 3341 | hci_dev_unlock(hdev); |
3299 | } | 3342 | } |
3300 | 3343 | ||
3301 | static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3344 | static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3302 | { | 3345 | { |
3303 | struct hci_ev_le_conn_complete *ev = (void *) skb->data; | 3346 | struct hci_ev_le_conn_complete *ev = (void *) skb->data; |
3304 | struct hci_conn *conn; | 3347 | struct hci_conn *conn; |
@@ -3307,6 +3350,19 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
3307 | 3350 | ||
3308 | hci_dev_lock(hdev); | 3351 | hci_dev_lock(hdev); |
3309 | 3352 | ||
3353 | if (ev->status) { | ||
3354 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | ||
3355 | if (!conn) | ||
3356 | goto unlock; | ||
3357 | |||
3358 | mgmt_connect_failed(hdev, &conn->dst, conn->type, | ||
3359 | conn->dst_type, ev->status); | ||
3360 | hci_proto_connect_cfm(conn, ev->status); | ||
3361 | conn->state = BT_CLOSED; | ||
3362 | hci_conn_del(conn); | ||
3363 | goto unlock; | ||
3364 | } | ||
3365 | |||
3310 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); | 3366 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); |
3311 | if (!conn) { | 3367 | if (!conn) { |
3312 | conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); | 3368 | conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); |
@@ -3319,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
3319 | conn->dst_type = ev->bdaddr_type; | 3375 | conn->dst_type = ev->bdaddr_type; |
3320 | } | 3376 | } |
3321 | 3377 | ||
3322 | if (ev->status) { | ||
3323 | mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, | ||
3324 | conn->dst_type, ev->status); | ||
3325 | hci_proto_connect_cfm(conn, ev->status); | ||
3326 | conn->state = BT_CLOSED; | ||
3327 | hci_conn_del(conn); | ||
3328 | goto unlock; | ||
3329 | } | ||
3330 | |||
3331 | if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) | 3378 | if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) |
3332 | mgmt_device_connected(hdev, &ev->bdaddr, conn->type, | 3379 | mgmt_device_connected(hdev, &ev->bdaddr, conn->type, |
3333 | conn->dst_type, 0, NULL, 0, NULL); | 3380 | conn->dst_type, 0, NULL, 0, NULL); |
@@ -3345,8 +3392,7 @@ unlock: | |||
3345 | hci_dev_unlock(hdev); | 3392 | hci_dev_unlock(hdev); |
3346 | } | 3393 | } |
3347 | 3394 | ||
3348 | static inline void hci_le_adv_report_evt(struct hci_dev *hdev, | 3395 | static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3349 | struct sk_buff *skb) | ||
3350 | { | 3396 | { |
3351 | u8 num_reports = skb->data[0]; | 3397 | u8 num_reports = skb->data[0]; |
3352 | void *ptr = &skb->data[1]; | 3398 | void *ptr = &skb->data[1]; |
@@ -3367,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev, | |||
3367 | hci_dev_unlock(hdev); | 3413 | hci_dev_unlock(hdev); |
3368 | } | 3414 | } |
3369 | 3415 | ||
3370 | static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, | 3416 | static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3371 | struct sk_buff *skb) | ||
3372 | { | 3417 | { |
3373 | struct hci_ev_le_ltk_req *ev = (void *) skb->data; | 3418 | struct hci_ev_le_ltk_req *ev = (void *) skb->data; |
3374 | struct hci_cp_le_ltk_reply cp; | 3419 | struct hci_cp_le_ltk_reply cp; |
@@ -3411,7 +3456,7 @@ not_found: | |||
3411 | hci_dev_unlock(hdev); | 3456 | hci_dev_unlock(hdev); |
3412 | } | 3457 | } |
3413 | 3458 | ||
3414 | static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3459 | static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3415 | { | 3460 | { |
3416 | struct hci_ev_le_meta *le_ev = (void *) skb->data; | 3461 | struct hci_ev_le_meta *le_ev = (void *) skb->data; |
3417 | 3462 | ||
@@ -3559,6 +3604,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
3559 | hci_extended_inquiry_result_evt(hdev, skb); | 3604 | hci_extended_inquiry_result_evt(hdev, skb); |
3560 | break; | 3605 | break; |
3561 | 3606 | ||
3607 | case HCI_EV_KEY_REFRESH_COMPLETE: | ||
3608 | hci_key_refresh_complete_evt(hdev, skb); | ||
3609 | break; | ||
3610 | |||
3562 | case HCI_EV_IO_CAPA_REQUEST: | 3611 | case HCI_EV_IO_CAPA_REQUEST: |
3563 | hci_io_capa_request_evt(hdev, skb); | 3612 | hci_io_capa_request_evt(hdev, skb); |
3564 | break; | 3613 | break; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 5914623f426a..a7f04de03d79 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -24,25 +24,7 @@ | |||
24 | 24 | ||
25 | /* Bluetooth HCI sockets. */ | 25 | /* Bluetooth HCI sockets. */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/export.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/capability.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/poll.h> | ||
35 | #include <linux/fcntl.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <linux/workqueue.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/compat.h> | ||
41 | #include <linux/socket.h> | ||
42 | #include <linux/ioctl.h> | ||
43 | #include <net/sock.h> | ||
44 | |||
45 | #include <linux/uaccess.h> | ||
46 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
47 | 29 | ||
48 | #include <net/bluetooth/bluetooth.h> | 30 | #include <net/bluetooth/bluetooth.h> |
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | |||
113 | flt = &hci_pi(sk)->filter; | 95 | flt = &hci_pi(sk)->filter; |
114 | 96 | ||
115 | if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? | 97 | if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? |
116 | 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) | 98 | 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), |
99 | &flt->type_mask)) | ||
117 | continue; | 100 | continue; |
118 | 101 | ||
119 | if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { | 102 | if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { |
120 | register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); | 103 | int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); |
121 | 104 | ||
122 | if (!hci_test_bit(evt, &flt->event_mask)) | 105 | if (!hci_test_bit(evt, &flt->event_mask)) |
123 | continue; | 106 | continue; |
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | |||
240 | struct hci_mon_hdr *hdr; | 223 | struct hci_mon_hdr *hdr; |
241 | 224 | ||
242 | /* Create a private copy with headroom */ | 225 | /* Create a private copy with headroom */ |
243 | skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); | 226 | skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, |
227 | GFP_ATOMIC); | ||
244 | if (!skb_copy) | 228 | if (!skb_copy) |
245 | continue; | 229 | continue; |
246 | 230 | ||
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) | |||
495 | } | 479 | } |
496 | 480 | ||
497 | /* Ioctls that require bound socket */ | 481 | /* Ioctls that require bound socket */ |
498 | static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) | 482 | static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, |
483 | unsigned long arg) | ||
499 | { | 484 | { |
500 | struct hci_dev *hdev = hci_pi(sk)->hdev; | 485 | struct hci_dev *hdev = hci_pi(sk)->hdev; |
501 | 486 | ||
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign | |||
540 | } | 525 | } |
541 | } | 526 | } |
542 | 527 | ||
543 | static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 528 | static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, |
529 | unsigned long arg) | ||
544 | { | 530 | { |
545 | struct sock *sk = sock->sk; | 531 | struct sock *sk = sock->sk; |
546 | void __user *argp = (void __user *) arg; | 532 | void __user *argp = (void __user *) arg; |
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a | |||
601 | } | 587 | } |
602 | } | 588 | } |
603 | 589 | ||
604 | static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | 590 | static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, |
591 | int addr_len) | ||
605 | { | 592 | { |
606 | struct sockaddr_hci haddr; | 593 | struct sockaddr_hci haddr; |
607 | struct sock *sk = sock->sk; | 594 | struct sock *sk = sock->sk; |
@@ -690,7 +677,8 @@ done: | |||
690 | return err; | 677 | return err; |
691 | } | 678 | } |
692 | 679 | ||
693 | static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) | 680 | static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, |
681 | int *addr_len, int peer) | ||
694 | { | 682 | { |
695 | struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; | 683 | struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; |
696 | struct sock *sk = sock->sk; | 684 | struct sock *sk = sock->sk; |
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add | |||
711 | return 0; | 699 | return 0; |
712 | } | 700 | } |
713 | 701 | ||
714 | static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | 702 | static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, |
703 | struct sk_buff *skb) | ||
715 | { | 704 | { |
716 | __u32 mask = hci_pi(sk)->cmsg_mask; | 705 | __u32 mask = hci_pi(sk)->cmsg_mask; |
717 | 706 | ||
718 | if (mask & HCI_CMSG_DIR) { | 707 | if (mask & HCI_CMSG_DIR) { |
719 | int incoming = bt_cb(skb)->incoming; | 708 | int incoming = bt_cb(skb)->incoming; |
720 | put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); | 709 | put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), |
710 | &incoming); | ||
721 | } | 711 | } |
722 | 712 | ||
723 | if (mask & HCI_CMSG_TSTAMP) { | 713 | if (mask & HCI_CMSG_TSTAMP) { |
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_ | |||
747 | } | 737 | } |
748 | 738 | ||
749 | static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | 739 | static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
750 | struct msghdr *msg, size_t len, int flags) | 740 | struct msghdr *msg, size_t len, int flags) |
751 | { | 741 | { |
752 | int noblock = flags & MSG_DONTWAIT; | 742 | int noblock = flags & MSG_DONTWAIT; |
753 | struct sock *sk = sock->sk; | 743 | struct sock *sk = sock->sk; |
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
857 | u16 ocf = hci_opcode_ocf(opcode); | 847 | u16 ocf = hci_opcode_ocf(opcode); |
858 | 848 | ||
859 | if (((ogf > HCI_SFLT_MAX_OGF) || | 849 | if (((ogf > HCI_SFLT_MAX_OGF) || |
860 | !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && | 850 | !hci_test_bit(ocf & HCI_FLT_OCF_BITS, |
861 | !capable(CAP_NET_RAW)) { | 851 | &hci_sec_filter.ocf_mask[ogf])) && |
852 | !capable(CAP_NET_RAW)) { | ||
862 | err = -EPERM; | 853 | err = -EPERM; |
863 | goto drop; | 854 | goto drop; |
864 | } | 855 | } |
@@ -891,7 +882,8 @@ drop: | |||
891 | goto done; | 882 | goto done; |
892 | } | 883 | } |
893 | 884 | ||
894 | static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) | 885 | static int hci_sock_setsockopt(struct socket *sock, int level, int optname, |
886 | char __user *optval, unsigned int len) | ||
895 | { | 887 | { |
896 | struct hci_ufilter uf = { .opcode = 0 }; | 888 | struct hci_ufilter uf = { .opcode = 0 }; |
897 | struct sock *sk = sock->sk; | 889 | struct sock *sk = sock->sk; |
@@ -973,7 +965,8 @@ done: | |||
973 | return err; | 965 | return err; |
974 | } | 966 | } |
975 | 967 | ||
976 | static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) | 968 | static int hci_sock_getsockopt(struct socket *sock, int level, int optname, |
969 | char __user *optval, int __user *optlen) | ||
977 | { | 970 | { |
978 | struct hci_ufilter uf; | 971 | struct hci_ufilter uf; |
979 | struct sock *sk = sock->sk; | 972 | struct sock *sk = sock->sk; |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 937f3187eafa..a20e61c3653d 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -1,10 +1,6 @@ | |||
1 | /* Bluetooth HCI driver model support. */ | 1 | /* Bluetooth HCI driver model support. */ |
2 | 2 | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <linux/debugfs.h> | 3 | #include <linux/debugfs.h> |
7 | #include <linux/seq_file.h> | ||
8 | #include <linux/module.h> | 4 | #include <linux/module.h> |
9 | 5 | ||
10 | #include <net/bluetooth/bluetooth.h> | 6 | #include <net/bluetooth/bluetooth.h> |
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type) | |||
31 | } | 27 | } |
32 | } | 28 | } |
33 | 29 | ||
34 | static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) | 30 | static ssize_t show_link_type(struct device *dev, |
31 | struct device_attribute *attr, char *buf) | ||
35 | { | 32 | { |
36 | struct hci_conn *conn = to_hci_conn(dev); | 33 | struct hci_conn *conn = to_hci_conn(dev); |
37 | return sprintf(buf, "%s\n", link_typetostr(conn->type)); | 34 | return sprintf(buf, "%s\n", link_typetostr(conn->type)); |
38 | } | 35 | } |
39 | 36 | ||
40 | static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) | 37 | static ssize_t show_link_address(struct device *dev, |
38 | struct device_attribute *attr, char *buf) | ||
41 | { | 39 | { |
42 | struct hci_conn *conn = to_hci_conn(dev); | 40 | struct hci_conn *conn = to_hci_conn(dev); |
43 | return sprintf(buf, "%s\n", batostr(&conn->dst)); | 41 | return sprintf(buf, "%s\n", batostr(&conn->dst)); |
44 | } | 42 | } |
45 | 43 | ||
46 | static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) | 44 | static ssize_t show_link_features(struct device *dev, |
45 | struct device_attribute *attr, char *buf) | ||
47 | { | 46 | { |
48 | struct hci_conn *conn = to_hci_conn(dev); | 47 | struct hci_conn *conn = to_hci_conn(dev); |
49 | 48 | ||
50 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | 49 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
51 | conn->features[0], conn->features[1], | 50 | conn->features[0], conn->features[1], |
52 | conn->features[2], conn->features[3], | 51 | conn->features[2], conn->features[3], |
53 | conn->features[4], conn->features[5], | 52 | conn->features[4], conn->features[5], |
54 | conn->features[6], conn->features[7]); | 53 | conn->features[6], conn->features[7]); |
55 | } | 54 | } |
56 | 55 | ||
57 | #define LINK_ATTR(_name, _mode, _show, _store) \ | 56 | #define LINK_ATTR(_name, _mode, _show, _store) \ |
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type) | |||
185 | } | 184 | } |
186 | } | 185 | } |
187 | 186 | ||
188 | static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) | 187 | static ssize_t show_bus(struct device *dev, |
188 | struct device_attribute *attr, char *buf) | ||
189 | { | 189 | { |
190 | struct hci_dev *hdev = to_hci_dev(dev); | 190 | struct hci_dev *hdev = to_hci_dev(dev); |
191 | return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); | 191 | return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); |
192 | } | 192 | } |
193 | 193 | ||
194 | static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) | 194 | static ssize_t show_type(struct device *dev, |
195 | struct device_attribute *attr, char *buf) | ||
195 | { | 196 | { |
196 | struct hci_dev *hdev = to_hci_dev(dev); | 197 | struct hci_dev *hdev = to_hci_dev(dev); |
197 | return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); | 198 | return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); |
198 | } | 199 | } |
199 | 200 | ||
200 | static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) | 201 | static ssize_t show_name(struct device *dev, |
202 | struct device_attribute *attr, char *buf) | ||
201 | { | 203 | { |
202 | struct hci_dev *hdev = to_hci_dev(dev); | 204 | struct hci_dev *hdev = to_hci_dev(dev); |
203 | char name[HCI_MAX_NAME_LENGTH + 1]; | 205 | char name[HCI_MAX_NAME_LENGTH + 1]; |
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char | |||
210 | return sprintf(buf, "%s\n", name); | 212 | return sprintf(buf, "%s\n", name); |
211 | } | 213 | } |
212 | 214 | ||
213 | static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) | 215 | static ssize_t show_class(struct device *dev, |
216 | struct device_attribute *attr, char *buf) | ||
214 | { | 217 | { |
215 | struct hci_dev *hdev = to_hci_dev(dev); | 218 | struct hci_dev *hdev = to_hci_dev(dev); |
216 | return sprintf(buf, "0x%.2x%.2x%.2x\n", | 219 | return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], |
217 | hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); | 220 | hdev->dev_class[1], hdev->dev_class[0]); |
218 | } | 221 | } |
219 | 222 | ||
220 | static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) | 223 | static ssize_t show_address(struct device *dev, |
224 | struct device_attribute *attr, char *buf) | ||
221 | { | 225 | { |
222 | struct hci_dev *hdev = to_hci_dev(dev); | 226 | struct hci_dev *hdev = to_hci_dev(dev); |
223 | return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); | 227 | return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); |
224 | } | 228 | } |
225 | 229 | ||
226 | static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) | 230 | static ssize_t show_features(struct device *dev, |
231 | struct device_attribute *attr, char *buf) | ||
227 | { | 232 | { |
228 | struct hci_dev *hdev = to_hci_dev(dev); | 233 | struct hci_dev *hdev = to_hci_dev(dev); |
229 | 234 | ||
230 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | 235 | return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
231 | hdev->features[0], hdev->features[1], | 236 | hdev->features[0], hdev->features[1], |
232 | hdev->features[2], hdev->features[3], | 237 | hdev->features[2], hdev->features[3], |
233 | hdev->features[4], hdev->features[5], | 238 | hdev->features[4], hdev->features[5], |
234 | hdev->features[6], hdev->features[7]); | 239 | hdev->features[6], hdev->features[7]); |
235 | } | 240 | } |
236 | 241 | ||
237 | static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) | 242 | static ssize_t show_manufacturer(struct device *dev, |
243 | struct device_attribute *attr, char *buf) | ||
238 | { | 244 | { |
239 | struct hci_dev *hdev = to_hci_dev(dev); | 245 | struct hci_dev *hdev = to_hci_dev(dev); |
240 | return sprintf(buf, "%d\n", hdev->manufacturer); | 246 | return sprintf(buf, "%d\n", hdev->manufacturer); |
241 | } | 247 | } |
242 | 248 | ||
243 | static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) | 249 | static ssize_t show_hci_version(struct device *dev, |
250 | struct device_attribute *attr, char *buf) | ||
244 | { | 251 | { |
245 | struct hci_dev *hdev = to_hci_dev(dev); | 252 | struct hci_dev *hdev = to_hci_dev(dev); |
246 | return sprintf(buf, "%d\n", hdev->hci_ver); | 253 | return sprintf(buf, "%d\n", hdev->hci_ver); |
247 | } | 254 | } |
248 | 255 | ||
249 | static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) | 256 | static ssize_t show_hci_revision(struct device *dev, |
257 | struct device_attribute *attr, char *buf) | ||
250 | { | 258 | { |
251 | struct hci_dev *hdev = to_hci_dev(dev); | 259 | struct hci_dev *hdev = to_hci_dev(dev); |
252 | return sprintf(buf, "%d\n", hdev->hci_rev); | 260 | return sprintf(buf, "%d\n", hdev->hci_rev); |
253 | } | 261 | } |
254 | 262 | ||
255 | static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) | 263 | static ssize_t show_idle_timeout(struct device *dev, |
264 | struct device_attribute *attr, char *buf) | ||
256 | { | 265 | { |
257 | struct hci_dev *hdev = to_hci_dev(dev); | 266 | struct hci_dev *hdev = to_hci_dev(dev); |
258 | return sprintf(buf, "%d\n", hdev->idle_timeout); | 267 | return sprintf(buf, "%d\n", hdev->idle_timeout); |
259 | } | 268 | } |
260 | 269 | ||
261 | static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 270 | static ssize_t store_idle_timeout(struct device *dev, |
271 | struct device_attribute *attr, | ||
272 | const char *buf, size_t count) | ||
262 | { | 273 | { |
263 | struct hci_dev *hdev = to_hci_dev(dev); | 274 | struct hci_dev *hdev = to_hci_dev(dev); |
264 | unsigned int val; | 275 | unsigned int val; |
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a | |||
276 | return count; | 287 | return count; |
277 | } | 288 | } |
278 | 289 | ||
279 | static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) | 290 | static ssize_t show_sniff_max_interval(struct device *dev, |
291 | struct device_attribute *attr, char *buf) | ||
280 | { | 292 | { |
281 | struct hci_dev *hdev = to_hci_dev(dev); | 293 | struct hci_dev *hdev = to_hci_dev(dev); |
282 | return sprintf(buf, "%d\n", hdev->sniff_max_interval); | 294 | return sprintf(buf, "%d\n", hdev->sniff_max_interval); |
283 | } | 295 | } |
284 | 296 | ||
285 | static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 297 | static ssize_t store_sniff_max_interval(struct device *dev, |
298 | struct device_attribute *attr, | ||
299 | const char *buf, size_t count) | ||
286 | { | 300 | { |
287 | struct hci_dev *hdev = to_hci_dev(dev); | 301 | struct hci_dev *hdev = to_hci_dev(dev); |
288 | u16 val; | 302 | u16 val; |
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib | |||
300 | return count; | 314 | return count; |
301 | } | 315 | } |
302 | 316 | ||
303 | static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) | 317 | static ssize_t show_sniff_min_interval(struct device *dev, |
318 | struct device_attribute *attr, char *buf) | ||
304 | { | 319 | { |
305 | struct hci_dev *hdev = to_hci_dev(dev); | 320 | struct hci_dev *hdev = to_hci_dev(dev); |
306 | return sprintf(buf, "%d\n", hdev->sniff_min_interval); | 321 | return sprintf(buf, "%d\n", hdev->sniff_min_interval); |
307 | } | 322 | } |
308 | 323 | ||
309 | static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 324 | static ssize_t store_sniff_min_interval(struct device *dev, |
325 | struct device_attribute *attr, | ||
326 | const char *buf, size_t count) | ||
310 | { | 327 | { |
311 | struct hci_dev *hdev = to_hci_dev(dev); | 328 | struct hci_dev *hdev = to_hci_dev(dev); |
312 | u16 val; | 329 | u16 val; |
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); | |||
335 | static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); | 352 | static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); |
336 | 353 | ||
337 | static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, | 354 | static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, |
338 | show_idle_timeout, store_idle_timeout); | 355 | show_idle_timeout, store_idle_timeout); |
339 | static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, | 356 | static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, |
340 | show_sniff_max_interval, store_sniff_max_interval); | 357 | show_sniff_max_interval, store_sniff_max_interval); |
341 | static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, | 358 | static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, |
342 | show_sniff_min_interval, store_sniff_min_interval); | 359 | show_sniff_min_interval, store_sniff_min_interval); |
343 | 360 | ||
344 | static struct attribute *bt_host_attrs[] = { | 361 | static struct attribute *bt_host_attrs[] = { |
345 | &dev_attr_bus.attr, | 362 | &dev_attr_bus.attr, |
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) | |||
455 | memcpy(&data5, &uuid[14], 2); | 472 | memcpy(&data5, &uuid[14], 2); |
456 | 473 | ||
457 | seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", | 474 | seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", |
458 | ntohl(data0), ntohs(data1), ntohs(data2), | 475 | ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3), |
459 | ntohs(data3), ntohl(data4), ntohs(data5)); | 476 | ntohl(data4), ntohs(data5)); |
460 | } | 477 | } |
461 | 478 | ||
462 | static int uuids_show(struct seq_file *f, void *p) | 479 | static int uuids_show(struct seq_file *f, void *p) |
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val) | |||
513 | } | 530 | } |
514 | 531 | ||
515 | DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, | 532 | DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, |
516 | auto_accept_delay_set, "%llu\n"); | 533 | auto_accept_delay_set, "%llu\n"); |
517 | 534 | ||
518 | void hci_init_sysfs(struct hci_dev *hdev) | 535 | void hci_init_sysfs(struct hci_dev *hdev) |
519 | { | 536 | { |
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev) | |||
547 | return 0; | 564 | return 0; |
548 | 565 | ||
549 | debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, | 566 | debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, |
550 | hdev, &inquiry_cache_fops); | 567 | hdev, &inquiry_cache_fops); |
551 | 568 | ||
552 | debugfs_create_file("blacklist", 0444, hdev->debugfs, | 569 | debugfs_create_file("blacklist", 0444, hdev->debugfs, |
553 | hdev, &blacklist_fops); | 570 | hdev, &blacklist_fops); |
554 | 571 | ||
555 | debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); | 572 | debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); |
556 | 573 | ||
557 | debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, | 574 | debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, |
558 | &auto_accept_delay_fops); | 575 | &auto_accept_delay_fops); |
559 | return 0; | 576 | return 0; |
560 | } | 577 | } |
561 | 578 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 2c20d765b394..ccd985da6518 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -21,27 +21,8 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/poll.h> | ||
31 | #include <linux/freezer.h> | ||
32 | #include <linux/fcntl.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/socket.h> | ||
35 | #include <linux/ioctl.h> | ||
36 | #include <linux/file.h> | 24 | #include <linux/file.h> |
37 | #include <linux/init.h> | ||
38 | #include <linux/wait.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
41 | #include <net/sock.h> | ||
42 | |||
43 | #include <linux/input.h> | ||
44 | #include <linux/hid.h> | ||
45 | #include <linux/hidraw.h> | 26 | #include <linux/hidraw.h> |
46 | 27 | ||
47 | #include <net/bluetooth/bluetooth.h> | 28 | #include <net/bluetooth/bluetooth.h> |
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) | |||
244 | } | 225 | } |
245 | 226 | ||
246 | static int __hidp_send_ctrl_message(struct hidp_session *session, | 227 | static int __hidp_send_ctrl_message(struct hidp_session *session, |
247 | unsigned char hdr, unsigned char *data, int size) | 228 | unsigned char hdr, unsigned char *data, |
229 | int size) | ||
248 | { | 230 | { |
249 | struct sk_buff *skb; | 231 | struct sk_buff *skb; |
250 | 232 | ||
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, | |||
268 | return 0; | 250 | return 0; |
269 | } | 251 | } |
270 | 252 | ||
271 | static inline int hidp_send_ctrl_message(struct hidp_session *session, | 253 | static int hidp_send_ctrl_message(struct hidp_session *session, |
272 | unsigned char hdr, unsigned char *data, int size) | 254 | unsigned char hdr, unsigned char *data, int size) |
273 | { | 255 | { |
274 | int err; | 256 | int err; |
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session) | |||
471 | mod_timer(&session->timer, jiffies + HZ * session->idle_to); | 453 | mod_timer(&session->timer, jiffies + HZ * session->idle_to); |
472 | } | 454 | } |
473 | 455 | ||
474 | static inline void hidp_del_timer(struct hidp_session *session) | 456 | static void hidp_del_timer(struct hidp_session *session) |
475 | { | 457 | { |
476 | if (session->idle_to > 0) | 458 | if (session->idle_to > 0) |
477 | del_timer(&session->timer); | 459 | del_timer(&session->timer); |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 73a32d705c1f..18b3f6892a36 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -20,22 +20,8 @@ | |||
20 | SOFTWARE IS DISCLAIMED. | 20 | SOFTWARE IS DISCLAIMED. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/export.h> |
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/poll.h> | ||
30 | #include <linux/fcntl.h> | ||
31 | #include <linux/skbuff.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/ioctl.h> | ||
34 | #include <linux/file.h> | 24 | #include <linux/file.h> |
35 | #include <linux/init.h> | ||
36 | #include <linux/compat.h> | ||
37 | #include <linux/gfp.h> | ||
38 | #include <net/sock.h> | ||
39 | 25 | ||
40 | #include "hidp.h" | 26 | #include "hidp.h" |
41 | 27 | ||
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 24f144b72a96..4ca88247b7c2 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -30,32 +30,14 @@ | |||
30 | 30 | ||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | 32 | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/capability.h> | ||
35 | #include <linux/errno.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/sched.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/poll.h> | ||
40 | #include <linux/fcntl.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/socket.h> | ||
44 | #include <linux/skbuff.h> | ||
45 | #include <linux/list.h> | ||
46 | #include <linux/device.h> | ||
47 | #include <linux/debugfs.h> | 33 | #include <linux/debugfs.h> |
48 | #include <linux/seq_file.h> | ||
49 | #include <linux/uaccess.h> | ||
50 | #include <linux/crc16.h> | 34 | #include <linux/crc16.h> |
51 | #include <net/sock.h> | ||
52 | |||
53 | #include <asm/unaligned.h> | ||
54 | 35 | ||
55 | #include <net/bluetooth/bluetooth.h> | 36 | #include <net/bluetooth/bluetooth.h> |
56 | #include <net/bluetooth/hci_core.h> | 37 | #include <net/bluetooth/hci_core.h> |
57 | #include <net/bluetooth/l2cap.h> | 38 | #include <net/bluetooth/l2cap.h> |
58 | #include <net/bluetooth/smp.h> | 39 | #include <net/bluetooth/smp.h> |
40 | #include <net/bluetooth/a2mp.h> | ||
59 | 41 | ||
60 | bool disable_ertm; | 42 | bool disable_ertm; |
61 | 43 | ||
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); | |||
73 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, | 55 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, |
74 | struct l2cap_chan *chan, int err); | 56 | struct l2cap_chan *chan, int err); |
75 | 57 | ||
58 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, | ||
59 | struct sk_buff_head *skbs, u8 event); | ||
60 | |||
76 | /* ---- L2CAP channels ---- */ | 61 | /* ---- L2CAP channels ---- */ |
77 | 62 | ||
78 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) | 63 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) |
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state) | |||
196 | state_to_string(state)); | 181 | state_to_string(state)); |
197 | 182 | ||
198 | chan->state = state; | 183 | chan->state = state; |
199 | chan->ops->state_change(chan->data, state); | 184 | chan->ops->state_change(chan, state); |
200 | } | 185 | } |
201 | 186 | ||
202 | static void l2cap_state_change(struct l2cap_chan *chan, int state) | 187 | static void l2cap_state_change(struct l2cap_chan *chan, int state) |
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) | |||
224 | release_sock(sk); | 209 | release_sock(sk); |
225 | } | 210 | } |
226 | 211 | ||
212 | static void __set_retrans_timer(struct l2cap_chan *chan) | ||
213 | { | ||
214 | if (!delayed_work_pending(&chan->monitor_timer) && | ||
215 | chan->retrans_timeout) { | ||
216 | l2cap_set_timer(chan, &chan->retrans_timer, | ||
217 | msecs_to_jiffies(chan->retrans_timeout)); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void __set_monitor_timer(struct l2cap_chan *chan) | ||
222 | { | ||
223 | __clear_retrans_timer(chan); | ||
224 | if (chan->monitor_timeout) { | ||
225 | l2cap_set_timer(chan, &chan->monitor_timer, | ||
226 | msecs_to_jiffies(chan->monitor_timeout)); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, | ||
231 | u16 seq) | ||
232 | { | ||
233 | struct sk_buff *skb; | ||
234 | |||
235 | skb_queue_walk(head, skb) { | ||
236 | if (bt_cb(skb)->control.txseq == seq) | ||
237 | return skb; | ||
238 | } | ||
239 | |||
240 | return NULL; | ||
241 | } | ||
242 | |||
227 | /* ---- L2CAP sequence number lists ---- */ | 243 | /* ---- L2CAP sequence number lists ---- */ |
228 | 244 | ||
229 | /* For ERTM, ordered lists of sequence numbers must be tracked for | 245 | /* For ERTM, ordered lists of sequence numbers must be tracked for |
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work) | |||
366 | 382 | ||
367 | l2cap_chan_unlock(chan); | 383 | l2cap_chan_unlock(chan); |
368 | 384 | ||
369 | chan->ops->close(chan->data); | 385 | chan->ops->close(chan); |
370 | mutex_unlock(&conn->chan_lock); | 386 | mutex_unlock(&conn->chan_lock); |
371 | 387 | ||
372 | l2cap_chan_put(chan); | 388 | l2cap_chan_put(chan); |
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void) | |||
392 | 408 | ||
393 | atomic_set(&chan->refcnt, 1); | 409 | atomic_set(&chan->refcnt, 1); |
394 | 410 | ||
411 | /* This flag is cleared in l2cap_chan_ready() */ | ||
412 | set_bit(CONF_NOT_COMPLETE, &chan->conf_state); | ||
413 | |||
395 | BT_DBG("chan %p", chan); | 414 | BT_DBG("chan %p", chan); |
396 | 415 | ||
397 | return chan; | 416 | return chan; |
@@ -430,7 +449,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | |||
430 | case L2CAP_CHAN_CONN_ORIENTED: | 449 | case L2CAP_CHAN_CONN_ORIENTED: |
431 | if (conn->hcon->type == LE_LINK) { | 450 | if (conn->hcon->type == LE_LINK) { |
432 | /* LE connection */ | 451 | /* LE connection */ |
433 | chan->omtu = L2CAP_LE_DEFAULT_MTU; | 452 | chan->omtu = L2CAP_DEFAULT_MTU; |
434 | chan->scid = L2CAP_CID_LE_DATA; | 453 | chan->scid = L2CAP_CID_LE_DATA; |
435 | chan->dcid = L2CAP_CID_LE_DATA; | 454 | chan->dcid = L2CAP_CID_LE_DATA; |
436 | } else { | 455 | } else { |
@@ -447,6 +466,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | |||
447 | chan->omtu = L2CAP_DEFAULT_MTU; | 466 | chan->omtu = L2CAP_DEFAULT_MTU; |
448 | break; | 467 | break; |
449 | 468 | ||
469 | case L2CAP_CHAN_CONN_FIX_A2MP: | ||
470 | chan->scid = L2CAP_CID_A2MP; | ||
471 | chan->dcid = L2CAP_CID_A2MP; | ||
472 | chan->omtu = L2CAP_A2MP_DEFAULT_MTU; | ||
473 | chan->imtu = L2CAP_A2MP_DEFAULT_MTU; | ||
474 | break; | ||
475 | |||
450 | default: | 476 | default: |
451 | /* Raw socket can send/recv signalling messages only */ | 477 | /* Raw socket can send/recv signalling messages only */ |
452 | chan->scid = L2CAP_CID_SIGNALING; | 478 | chan->scid = L2CAP_CID_SIGNALING; |
@@ -466,18 +492,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | |||
466 | list_add(&chan->list, &conn->chan_l); | 492 | list_add(&chan->list, &conn->chan_l); |
467 | } | 493 | } |
468 | 494 | ||
469 | static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | 495 | void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) |
470 | { | 496 | { |
471 | mutex_lock(&conn->chan_lock); | 497 | mutex_lock(&conn->chan_lock); |
472 | __l2cap_chan_add(conn, chan); | 498 | __l2cap_chan_add(conn, chan); |
473 | mutex_unlock(&conn->chan_lock); | 499 | mutex_unlock(&conn->chan_lock); |
474 | } | 500 | } |
475 | 501 | ||
476 | static void l2cap_chan_del(struct l2cap_chan *chan, int err) | 502 | void l2cap_chan_del(struct l2cap_chan *chan, int err) |
477 | { | 503 | { |
478 | struct sock *sk = chan->sk; | ||
479 | struct l2cap_conn *conn = chan->conn; | 504 | struct l2cap_conn *conn = chan->conn; |
480 | struct sock *parent = bt_sk(sk)->parent; | ||
481 | 505 | ||
482 | __clear_chan_timer(chan); | 506 | __clear_chan_timer(chan); |
483 | 507 | ||
@@ -490,34 +514,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err) | |||
490 | l2cap_chan_put(chan); | 514 | l2cap_chan_put(chan); |
491 | 515 | ||
492 | chan->conn = NULL; | 516 | chan->conn = NULL; |
493 | hci_conn_put(conn->hcon); | ||
494 | } | ||
495 | |||
496 | lock_sock(sk); | ||
497 | |||
498 | __l2cap_state_change(chan, BT_CLOSED); | ||
499 | sock_set_flag(sk, SOCK_ZAPPED); | ||
500 | 517 | ||
501 | if (err) | 518 | if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) |
502 | __l2cap_chan_set_err(chan, err); | 519 | hci_conn_put(conn->hcon); |
520 | } | ||
503 | 521 | ||
504 | if (parent) { | 522 | if (chan->ops->teardown) |
505 | bt_accept_unlink(sk); | 523 | chan->ops->teardown(chan, err); |
506 | parent->sk_data_ready(parent, 0); | ||
507 | } else | ||
508 | sk->sk_state_change(sk); | ||
509 | 524 | ||
510 | release_sock(sk); | 525 | if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) |
511 | |||
512 | if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && | ||
513 | test_bit(CONF_INPUT_DONE, &chan->conf_state))) | ||
514 | return; | 526 | return; |
515 | 527 | ||
516 | skb_queue_purge(&chan->tx_q); | 528 | switch(chan->mode) { |
517 | 529 | case L2CAP_MODE_BASIC: | |
518 | if (chan->mode == L2CAP_MODE_ERTM) { | 530 | break; |
519 | struct srej_list *l, *tmp; | ||
520 | 531 | ||
532 | case L2CAP_MODE_ERTM: | ||
521 | __clear_retrans_timer(chan); | 533 | __clear_retrans_timer(chan); |
522 | __clear_monitor_timer(chan); | 534 | __clear_monitor_timer(chan); |
523 | __clear_ack_timer(chan); | 535 | __clear_ack_timer(chan); |
@@ -526,30 +538,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err) | |||
526 | 538 | ||
527 | l2cap_seq_list_free(&chan->srej_list); | 539 | l2cap_seq_list_free(&chan->srej_list); |
528 | l2cap_seq_list_free(&chan->retrans_list); | 540 | l2cap_seq_list_free(&chan->retrans_list); |
529 | list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { | ||
530 | list_del(&l->list); | ||
531 | kfree(l); | ||
532 | } | ||
533 | } | ||
534 | } | ||
535 | |||
536 | static void l2cap_chan_cleanup_listen(struct sock *parent) | ||
537 | { | ||
538 | struct sock *sk; | ||
539 | |||
540 | BT_DBG("parent %p", parent); | ||
541 | 541 | ||
542 | /* Close not yet accepted channels */ | 542 | /* fall through */ |
543 | while ((sk = bt_accept_dequeue(parent, NULL))) { | ||
544 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | ||
545 | |||
546 | l2cap_chan_lock(chan); | ||
547 | __clear_chan_timer(chan); | ||
548 | l2cap_chan_close(chan, ECONNRESET); | ||
549 | l2cap_chan_unlock(chan); | ||
550 | 543 | ||
551 | chan->ops->close(chan->data); | 544 | case L2CAP_MODE_STREAMING: |
545 | skb_queue_purge(&chan->tx_q); | ||
546 | break; | ||
552 | } | 547 | } |
548 | |||
549 | return; | ||
553 | } | 550 | } |
554 | 551 | ||
555 | void l2cap_chan_close(struct l2cap_chan *chan, int reason) | 552 | void l2cap_chan_close(struct l2cap_chan *chan, int reason) |
@@ -562,12 +559,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
562 | 559 | ||
563 | switch (chan->state) { | 560 | switch (chan->state) { |
564 | case BT_LISTEN: | 561 | case BT_LISTEN: |
565 | lock_sock(sk); | 562 | if (chan->ops->teardown) |
566 | l2cap_chan_cleanup_listen(sk); | 563 | chan->ops->teardown(chan, 0); |
567 | |||
568 | __l2cap_state_change(chan, BT_CLOSED); | ||
569 | sock_set_flag(sk, SOCK_ZAPPED); | ||
570 | release_sock(sk); | ||
571 | break; | 564 | break; |
572 | 565 | ||
573 | case BT_CONNECTED: | 566 | case BT_CONNECTED: |
@@ -595,7 +588,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
595 | rsp.scid = cpu_to_le16(chan->dcid); | 588 | rsp.scid = cpu_to_le16(chan->dcid); |
596 | rsp.dcid = cpu_to_le16(chan->scid); | 589 | rsp.dcid = cpu_to_le16(chan->scid); |
597 | rsp.result = cpu_to_le16(result); | 590 | rsp.result = cpu_to_le16(result); |
598 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); | 591 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
599 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 592 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
600 | sizeof(rsp), &rsp); | 593 | sizeof(rsp), &rsp); |
601 | } | 594 | } |
@@ -609,9 +602,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
609 | break; | 602 | break; |
610 | 603 | ||
611 | default: | 604 | default: |
612 | lock_sock(sk); | 605 | if (chan->ops->teardown) |
613 | sock_set_flag(sk, SOCK_ZAPPED); | 606 | chan->ops->teardown(chan, 0); |
614 | release_sock(sk); | ||
615 | break; | 607 | break; |
616 | } | 608 | } |
617 | } | 609 | } |
@@ -627,7 +619,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) | |||
627 | default: | 619 | default: |
628 | return HCI_AT_NO_BONDING; | 620 | return HCI_AT_NO_BONDING; |
629 | } | 621 | } |
630 | } else if (chan->psm == cpu_to_le16(0x0001)) { | 622 | } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) { |
631 | if (chan->sec_level == BT_SECURITY_LOW) | 623 | if (chan->sec_level == BT_SECURITY_LOW) |
632 | chan->sec_level = BT_SECURITY_SDP; | 624 | chan->sec_level = BT_SECURITY_SDP; |
633 | 625 | ||
@@ -773,9 +765,11 @@ static inline void __unpack_control(struct l2cap_chan *chan, | |||
773 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { | 765 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { |
774 | __unpack_extended_control(get_unaligned_le32(skb->data), | 766 | __unpack_extended_control(get_unaligned_le32(skb->data), |
775 | &bt_cb(skb)->control); | 767 | &bt_cb(skb)->control); |
768 | skb_pull(skb, L2CAP_EXT_CTRL_SIZE); | ||
776 | } else { | 769 | } else { |
777 | __unpack_enhanced_control(get_unaligned_le16(skb->data), | 770 | __unpack_enhanced_control(get_unaligned_le16(skb->data), |
778 | &bt_cb(skb)->control); | 771 | &bt_cb(skb)->control); |
772 | skb_pull(skb, L2CAP_ENH_CTRL_SIZE); | ||
779 | } | 773 | } |
780 | } | 774 | } |
781 | 775 | ||
@@ -830,66 +824,102 @@ static inline void __pack_control(struct l2cap_chan *chan, | |||
830 | } | 824 | } |
831 | } | 825 | } |
832 | 826 | ||
833 | static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) | 827 | static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) |
834 | { | 828 | { |
835 | struct sk_buff *skb; | ||
836 | struct l2cap_hdr *lh; | ||
837 | struct l2cap_conn *conn = chan->conn; | ||
838 | int count, hlen; | ||
839 | |||
840 | if (chan->state != BT_CONNECTED) | ||
841 | return; | ||
842 | |||
843 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | 829 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
844 | hlen = L2CAP_EXT_HDR_SIZE; | 830 | return L2CAP_EXT_HDR_SIZE; |
845 | else | 831 | else |
846 | hlen = L2CAP_ENH_HDR_SIZE; | 832 | return L2CAP_ENH_HDR_SIZE; |
833 | } | ||
834 | |||
835 | static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, | ||
836 | u32 control) | ||
837 | { | ||
838 | struct sk_buff *skb; | ||
839 | struct l2cap_hdr *lh; | ||
840 | int hlen = __ertm_hdr_size(chan); | ||
847 | 841 | ||
848 | if (chan->fcs == L2CAP_FCS_CRC16) | 842 | if (chan->fcs == L2CAP_FCS_CRC16) |
849 | hlen += L2CAP_FCS_SIZE; | 843 | hlen += L2CAP_FCS_SIZE; |
850 | 844 | ||
851 | BT_DBG("chan %p, control 0x%8.8x", chan, control); | 845 | skb = bt_skb_alloc(hlen, GFP_KERNEL); |
852 | |||
853 | count = min_t(unsigned int, conn->mtu, hlen); | ||
854 | |||
855 | control |= __set_sframe(chan); | ||
856 | 846 | ||
857 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | ||
858 | control |= __set_ctrl_final(chan); | ||
859 | |||
860 | if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) | ||
861 | control |= __set_ctrl_poll(chan); | ||
862 | |||
863 | skb = bt_skb_alloc(count, GFP_ATOMIC); | ||
864 | if (!skb) | 847 | if (!skb) |
865 | return; | 848 | return ERR_PTR(-ENOMEM); |
866 | 849 | ||
867 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 850 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
868 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); | 851 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); |
869 | lh->cid = cpu_to_le16(chan->dcid); | 852 | lh->cid = cpu_to_le16(chan->dcid); |
870 | 853 | ||
871 | __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); | 854 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
855 | put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); | ||
856 | else | ||
857 | put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); | ||
872 | 858 | ||
873 | if (chan->fcs == L2CAP_FCS_CRC16) { | 859 | if (chan->fcs == L2CAP_FCS_CRC16) { |
874 | u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); | 860 | u16 fcs = crc16(0, (u8 *)skb->data, skb->len); |
875 | put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); | 861 | put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); |
876 | } | 862 | } |
877 | 863 | ||
878 | skb->priority = HCI_PRIO_MAX; | 864 | skb->priority = HCI_PRIO_MAX; |
879 | l2cap_do_send(chan, skb); | 865 | return skb; |
880 | } | 866 | } |
881 | 867 | ||
882 | static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) | 868 | static void l2cap_send_sframe(struct l2cap_chan *chan, |
869 | struct l2cap_ctrl *control) | ||
883 | { | 870 | { |
884 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 871 | struct sk_buff *skb; |
885 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); | 872 | u32 control_field; |
873 | |||
874 | BT_DBG("chan %p, control %p", chan, control); | ||
875 | |||
876 | if (!control->sframe) | ||
877 | return; | ||
878 | |||
879 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && | ||
880 | !control->poll) | ||
881 | control->final = 1; | ||
882 | |||
883 | if (control->super == L2CAP_SUPER_RR) | ||
884 | clear_bit(CONN_RNR_SENT, &chan->conn_state); | ||
885 | else if (control->super == L2CAP_SUPER_RNR) | ||
886 | set_bit(CONN_RNR_SENT, &chan->conn_state); | 886 | set_bit(CONN_RNR_SENT, &chan->conn_state); |
887 | } else | ||
888 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); | ||
889 | 887 | ||
890 | control |= __set_reqseq(chan, chan->buffer_seq); | 888 | if (control->super != L2CAP_SUPER_SREJ) { |
889 | chan->last_acked_seq = control->reqseq; | ||
890 | __clear_ack_timer(chan); | ||
891 | } | ||
892 | |||
893 | BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, | ||
894 | control->final, control->poll, control->super); | ||
895 | |||
896 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
897 | control_field = __pack_extended_control(control); | ||
898 | else | ||
899 | control_field = __pack_enhanced_control(control); | ||
900 | |||
901 | skb = l2cap_create_sframe_pdu(chan, control_field); | ||
902 | if (!IS_ERR(skb)) | ||
903 | l2cap_do_send(chan, skb); | ||
904 | } | ||
905 | |||
906 | static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) | ||
907 | { | ||
908 | struct l2cap_ctrl control; | ||
909 | |||
910 | BT_DBG("chan %p, poll %d", chan, poll); | ||
911 | |||
912 | memset(&control, 0, sizeof(control)); | ||
913 | control.sframe = 1; | ||
914 | control.poll = poll; | ||
891 | 915 | ||
892 | l2cap_send_sframe(chan, control); | 916 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) |
917 | control.super = L2CAP_SUPER_RNR; | ||
918 | else | ||
919 | control.super = L2CAP_SUPER_RR; | ||
920 | |||
921 | control.reqseq = chan->buffer_seq; | ||
922 | l2cap_send_sframe(chan, &control); | ||
893 | } | 923 | } |
894 | 924 | ||
895 | static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) | 925 | static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) |
@@ -914,25 +944,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan) | |||
914 | 944 | ||
915 | static void l2cap_chan_ready(struct l2cap_chan *chan) | 945 | static void l2cap_chan_ready(struct l2cap_chan *chan) |
916 | { | 946 | { |
917 | struct sock *sk = chan->sk; | 947 | /* This clears all conf flags, including CONF_NOT_COMPLETE */ |
918 | struct sock *parent; | ||
919 | |||
920 | lock_sock(sk); | ||
921 | |||
922 | parent = bt_sk(sk)->parent; | ||
923 | |||
924 | BT_DBG("sk %p, parent %p", sk, parent); | ||
925 | |||
926 | chan->conf_state = 0; | 948 | chan->conf_state = 0; |
927 | __clear_chan_timer(chan); | 949 | __clear_chan_timer(chan); |
928 | 950 | ||
929 | __l2cap_state_change(chan, BT_CONNECTED); | 951 | chan->state = BT_CONNECTED; |
930 | sk->sk_state_change(sk); | ||
931 | |||
932 | if (parent) | ||
933 | parent->sk_data_ready(parent, 0); | ||
934 | 952 | ||
935 | release_sock(sk); | 953 | chan->ops->ready(chan); |
936 | } | 954 | } |
937 | 955 | ||
938 | static void l2cap_do_start(struct l2cap_chan *chan) | 956 | static void l2cap_do_start(struct l2cap_chan *chan) |
@@ -953,7 +971,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) | |||
953 | l2cap_send_conn_req(chan); | 971 | l2cap_send_conn_req(chan); |
954 | } else { | 972 | } else { |
955 | struct l2cap_info_req req; | 973 | struct l2cap_info_req req; |
956 | req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | 974 | req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); |
957 | 975 | ||
958 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | 976 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
959 | conn->info_ident = l2cap_get_ident(conn); | 977 | conn->info_ident = l2cap_get_ident(conn); |
@@ -995,6 +1013,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c | |||
995 | __clear_ack_timer(chan); | 1013 | __clear_ack_timer(chan); |
996 | } | 1014 | } |
997 | 1015 | ||
1016 | if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { | ||
1017 | __l2cap_state_change(chan, BT_DISCONN); | ||
1018 | return; | ||
1019 | } | ||
1020 | |||
998 | req.dcid = cpu_to_le16(chan->dcid); | 1021 | req.dcid = cpu_to_le16(chan->dcid); |
999 | req.scid = cpu_to_le16(chan->scid); | 1022 | req.scid = cpu_to_le16(chan->scid); |
1000 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | 1023 | l2cap_send_cmd(conn, l2cap_get_ident(conn), |
@@ -1053,20 +1076,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
1053 | if (test_bit(BT_SK_DEFER_SETUP, | 1076 | if (test_bit(BT_SK_DEFER_SETUP, |
1054 | &bt_sk(sk)->flags)) { | 1077 | &bt_sk(sk)->flags)) { |
1055 | struct sock *parent = bt_sk(sk)->parent; | 1078 | struct sock *parent = bt_sk(sk)->parent; |
1056 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); | 1079 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); |
1057 | rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); | 1080 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); |
1058 | if (parent) | 1081 | if (parent) |
1059 | parent->sk_data_ready(parent, 0); | 1082 | parent->sk_data_ready(parent, 0); |
1060 | 1083 | ||
1061 | } else { | 1084 | } else { |
1062 | __l2cap_state_change(chan, BT_CONFIG); | 1085 | __l2cap_state_change(chan, BT_CONFIG); |
1063 | rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); | 1086 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); |
1064 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); | 1087 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
1065 | } | 1088 | } |
1066 | release_sock(sk); | 1089 | release_sock(sk); |
1067 | } else { | 1090 | } else { |
1068 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); | 1091 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); |
1069 | rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); | 1092 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); |
1070 | } | 1093 | } |
1071 | 1094 | ||
1072 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 1095 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
@@ -1150,13 +1173,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
1150 | 1173 | ||
1151 | lock_sock(parent); | 1174 | lock_sock(parent); |
1152 | 1175 | ||
1153 | /* Check for backlog size */ | 1176 | chan = pchan->ops->new_connection(pchan); |
1154 | if (sk_acceptq_is_full(parent)) { | ||
1155 | BT_DBG("backlog full %d", parent->sk_ack_backlog); | ||
1156 | goto clean; | ||
1157 | } | ||
1158 | |||
1159 | chan = pchan->ops->new_connection(pchan->data); | ||
1160 | if (!chan) | 1177 | if (!chan) |
1161 | goto clean; | 1178 | goto clean; |
1162 | 1179 | ||
@@ -1171,10 +1188,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
1171 | 1188 | ||
1172 | l2cap_chan_add(conn, chan); | 1189 | l2cap_chan_add(conn, chan); |
1173 | 1190 | ||
1174 | __set_chan_timer(chan, sk->sk_sndtimeo); | 1191 | l2cap_chan_ready(chan); |
1175 | |||
1176 | __l2cap_state_change(chan, BT_CONNECTED); | ||
1177 | parent->sk_data_ready(parent, 0); | ||
1178 | 1192 | ||
1179 | clean: | 1193 | clean: |
1180 | release_sock(parent); | 1194 | release_sock(parent); |
@@ -1198,6 +1212,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) | |||
1198 | 1212 | ||
1199 | l2cap_chan_lock(chan); | 1213 | l2cap_chan_lock(chan); |
1200 | 1214 | ||
1215 | if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { | ||
1216 | l2cap_chan_unlock(chan); | ||
1217 | continue; | ||
1218 | } | ||
1219 | |||
1201 | if (conn->hcon->type == LE_LINK) { | 1220 | if (conn->hcon->type == LE_LINK) { |
1202 | if (smp_conn_security(conn, chan->sec_level)) | 1221 | if (smp_conn_security(conn, chan->sec_level)) |
1203 | l2cap_chan_ready(chan); | 1222 | l2cap_chan_ready(chan); |
@@ -1270,7 +1289,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) | |||
1270 | 1289 | ||
1271 | l2cap_chan_unlock(chan); | 1290 | l2cap_chan_unlock(chan); |
1272 | 1291 | ||
1273 | chan->ops->close(chan->data); | 1292 | chan->ops->close(chan); |
1274 | l2cap_chan_put(chan); | 1293 | l2cap_chan_put(chan); |
1275 | } | 1294 | } |
1276 | 1295 | ||
@@ -1295,7 +1314,12 @@ static void security_timeout(struct work_struct *work) | |||
1295 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, | 1314 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
1296 | security_timer.work); | 1315 | security_timer.work); |
1297 | 1316 | ||
1298 | l2cap_conn_del(conn->hcon, ETIMEDOUT); | 1317 | BT_DBG("conn %p", conn); |
1318 | |||
1319 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { | ||
1320 | smp_chan_destroy(conn); | ||
1321 | l2cap_conn_del(conn->hcon, ETIMEDOUT); | ||
1322 | } | ||
1299 | } | 1323 | } |
1300 | 1324 | ||
1301 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | 1325 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) |
@@ -1439,21 +1463,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, | |||
1439 | goto done; | 1463 | goto done; |
1440 | } | 1464 | } |
1441 | 1465 | ||
1442 | lock_sock(sk); | 1466 | switch (chan->state) { |
1443 | |||
1444 | switch (sk->sk_state) { | ||
1445 | case BT_CONNECT: | 1467 | case BT_CONNECT: |
1446 | case BT_CONNECT2: | 1468 | case BT_CONNECT2: |
1447 | case BT_CONFIG: | 1469 | case BT_CONFIG: |
1448 | /* Already connecting */ | 1470 | /* Already connecting */ |
1449 | err = 0; | 1471 | err = 0; |
1450 | release_sock(sk); | ||
1451 | goto done; | 1472 | goto done; |
1452 | 1473 | ||
1453 | case BT_CONNECTED: | 1474 | case BT_CONNECTED: |
1454 | /* Already connected */ | 1475 | /* Already connected */ |
1455 | err = -EISCONN; | 1476 | err = -EISCONN; |
1456 | release_sock(sk); | ||
1457 | goto done; | 1477 | goto done; |
1458 | 1478 | ||
1459 | case BT_OPEN: | 1479 | case BT_OPEN: |
@@ -1463,13 +1483,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, | |||
1463 | 1483 | ||
1464 | default: | 1484 | default: |
1465 | err = -EBADFD; | 1485 | err = -EBADFD; |
1466 | release_sock(sk); | ||
1467 | goto done; | 1486 | goto done; |
1468 | } | 1487 | } |
1469 | 1488 | ||
1470 | /* Set destination address and psm */ | 1489 | /* Set destination address and psm */ |
1490 | lock_sock(sk); | ||
1471 | bacpy(&bt_sk(sk)->dst, dst); | 1491 | bacpy(&bt_sk(sk)->dst, dst); |
1472 | |||
1473 | release_sock(sk); | 1492 | release_sock(sk); |
1474 | 1493 | ||
1475 | chan->psm = psm; | 1494 | chan->psm = psm; |
@@ -1571,23 +1590,20 @@ int __l2cap_wait_ack(struct sock *sk) | |||
1571 | static void l2cap_monitor_timeout(struct work_struct *work) | 1590 | static void l2cap_monitor_timeout(struct work_struct *work) |
1572 | { | 1591 | { |
1573 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, | 1592 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1574 | monitor_timer.work); | 1593 | monitor_timer.work); |
1575 | 1594 | ||
1576 | BT_DBG("chan %p", chan); | 1595 | BT_DBG("chan %p", chan); |
1577 | 1596 | ||
1578 | l2cap_chan_lock(chan); | 1597 | l2cap_chan_lock(chan); |
1579 | 1598 | ||
1580 | if (chan->retry_count >= chan->remote_max_tx) { | 1599 | if (!chan->conn) { |
1581 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); | ||
1582 | l2cap_chan_unlock(chan); | 1600 | l2cap_chan_unlock(chan); |
1583 | l2cap_chan_put(chan); | 1601 | l2cap_chan_put(chan); |
1584 | return; | 1602 | return; |
1585 | } | 1603 | } |
1586 | 1604 | ||
1587 | chan->retry_count++; | 1605 | l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); |
1588 | __set_monitor_timer(chan); | ||
1589 | 1606 | ||
1590 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); | ||
1591 | l2cap_chan_unlock(chan); | 1607 | l2cap_chan_unlock(chan); |
1592 | l2cap_chan_put(chan); | 1608 | l2cap_chan_put(chan); |
1593 | } | 1609 | } |
@@ -1595,234 +1611,293 @@ static void l2cap_monitor_timeout(struct work_struct *work) | |||
1595 | static void l2cap_retrans_timeout(struct work_struct *work) | 1611 | static void l2cap_retrans_timeout(struct work_struct *work) |
1596 | { | 1612 | { |
1597 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, | 1613 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1598 | retrans_timer.work); | 1614 | retrans_timer.work); |
1599 | 1615 | ||
1600 | BT_DBG("chan %p", chan); | 1616 | BT_DBG("chan %p", chan); |
1601 | 1617 | ||
1602 | l2cap_chan_lock(chan); | 1618 | l2cap_chan_lock(chan); |
1603 | 1619 | ||
1604 | chan->retry_count = 1; | 1620 | if (!chan->conn) { |
1605 | __set_monitor_timer(chan); | 1621 | l2cap_chan_unlock(chan); |
1606 | 1622 | l2cap_chan_put(chan); | |
1607 | set_bit(CONN_WAIT_F, &chan->conn_state); | 1623 | return; |
1608 | 1624 | } | |
1609 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); | ||
1610 | 1625 | ||
1626 | l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); | ||
1611 | l2cap_chan_unlock(chan); | 1627 | l2cap_chan_unlock(chan); |
1612 | l2cap_chan_put(chan); | 1628 | l2cap_chan_put(chan); |
1613 | } | 1629 | } |
1614 | 1630 | ||
1615 | static void l2cap_drop_acked_frames(struct l2cap_chan *chan) | 1631 | static void l2cap_streaming_send(struct l2cap_chan *chan, |
1632 | struct sk_buff_head *skbs) | ||
1616 | { | 1633 | { |
1617 | struct sk_buff *skb; | 1634 | struct sk_buff *skb; |
1635 | struct l2cap_ctrl *control; | ||
1618 | 1636 | ||
1619 | while ((skb = skb_peek(&chan->tx_q)) && | 1637 | BT_DBG("chan %p, skbs %p", chan, skbs); |
1620 | chan->unacked_frames) { | ||
1621 | if (bt_cb(skb)->control.txseq == chan->expected_ack_seq) | ||
1622 | break; | ||
1623 | 1638 | ||
1624 | skb = skb_dequeue(&chan->tx_q); | 1639 | skb_queue_splice_tail_init(skbs, &chan->tx_q); |
1625 | kfree_skb(skb); | ||
1626 | 1640 | ||
1627 | chan->unacked_frames--; | 1641 | while (!skb_queue_empty(&chan->tx_q)) { |
1628 | } | ||
1629 | 1642 | ||
1630 | if (!chan->unacked_frames) | 1643 | skb = skb_dequeue(&chan->tx_q); |
1631 | __clear_retrans_timer(chan); | ||
1632 | } | ||
1633 | 1644 | ||
1634 | static void l2cap_streaming_send(struct l2cap_chan *chan) | 1645 | bt_cb(skb)->control.retries = 1; |
1635 | { | 1646 | control = &bt_cb(skb)->control; |
1636 | struct sk_buff *skb; | 1647 | |
1637 | u32 control; | 1648 | control->reqseq = 0; |
1638 | u16 fcs; | 1649 | control->txseq = chan->next_tx_seq; |
1639 | 1650 | ||
1640 | while ((skb = skb_dequeue(&chan->tx_q))) { | 1651 | __pack_control(chan, control, skb); |
1641 | control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); | ||
1642 | control |= __set_txseq(chan, chan->next_tx_seq); | ||
1643 | control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); | ||
1644 | __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); | ||
1645 | 1652 | ||
1646 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1653 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1647 | fcs = crc16(0, (u8 *)skb->data, | 1654 | u16 fcs = crc16(0, (u8 *) skb->data, skb->len); |
1648 | skb->len - L2CAP_FCS_SIZE); | 1655 | put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); |
1649 | put_unaligned_le16(fcs, | ||
1650 | skb->data + skb->len - L2CAP_FCS_SIZE); | ||
1651 | } | 1656 | } |
1652 | 1657 | ||
1653 | l2cap_do_send(chan, skb); | 1658 | l2cap_do_send(chan, skb); |
1654 | 1659 | ||
1660 | BT_DBG("Sent txseq %d", (int)control->txseq); | ||
1661 | |||
1655 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); | 1662 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); |
1663 | chan->frames_sent++; | ||
1656 | } | 1664 | } |
1657 | } | 1665 | } |
1658 | 1666 | ||
1659 | static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) | 1667 | static int l2cap_ertm_send(struct l2cap_chan *chan) |
1660 | { | 1668 | { |
1661 | struct sk_buff *skb, *tx_skb; | 1669 | struct sk_buff *skb, *tx_skb; |
1662 | u16 fcs; | 1670 | struct l2cap_ctrl *control; |
1663 | u32 control; | 1671 | int sent = 0; |
1664 | 1672 | ||
1665 | skb = skb_peek(&chan->tx_q); | 1673 | BT_DBG("chan %p", chan); |
1666 | if (!skb) | ||
1667 | return; | ||
1668 | 1674 | ||
1669 | while (bt_cb(skb)->control.txseq != tx_seq) { | 1675 | if (chan->state != BT_CONNECTED) |
1670 | if (skb_queue_is_last(&chan->tx_q, skb)) | 1676 | return -ENOTCONN; |
1671 | return; | ||
1672 | 1677 | ||
1673 | skb = skb_queue_next(&chan->tx_q, skb); | 1678 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
1674 | } | 1679 | return 0; |
1675 | 1680 | ||
1676 | if (bt_cb(skb)->control.retries == chan->remote_max_tx && | 1681 | while (chan->tx_send_head && |
1677 | chan->remote_max_tx) { | 1682 | chan->unacked_frames < chan->remote_tx_win && |
1678 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); | 1683 | chan->tx_state == L2CAP_TX_STATE_XMIT) { |
1679 | return; | ||
1680 | } | ||
1681 | 1684 | ||
1682 | tx_skb = skb_clone(skb, GFP_ATOMIC); | 1685 | skb = chan->tx_send_head; |
1683 | bt_cb(skb)->control.retries++; | ||
1684 | 1686 | ||
1685 | control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); | 1687 | bt_cb(skb)->control.retries = 1; |
1686 | control &= __get_sar_mask(chan); | 1688 | control = &bt_cb(skb)->control; |
1687 | 1689 | ||
1688 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 1690 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
1689 | control |= __set_ctrl_final(chan); | 1691 | control->final = 1; |
1690 | 1692 | ||
1691 | control |= __set_reqseq(chan, chan->buffer_seq); | 1693 | control->reqseq = chan->buffer_seq; |
1692 | control |= __set_txseq(chan, tx_seq); | 1694 | chan->last_acked_seq = chan->buffer_seq; |
1695 | control->txseq = chan->next_tx_seq; | ||
1693 | 1696 | ||
1694 | __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); | 1697 | __pack_control(chan, control, skb); |
1695 | 1698 | ||
1696 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1699 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1697 | fcs = crc16(0, (u8 *)tx_skb->data, | 1700 | u16 fcs = crc16(0, (u8 *) skb->data, skb->len); |
1698 | tx_skb->len - L2CAP_FCS_SIZE); | 1701 | put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); |
1699 | put_unaligned_le16(fcs, | 1702 | } |
1700 | tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); | 1703 | |
1704 | /* Clone after data has been modified. Data is assumed to be | ||
1705 | read-only (for locking purposes) on cloned sk_buffs. | ||
1706 | */ | ||
1707 | tx_skb = skb_clone(skb, GFP_KERNEL); | ||
1708 | |||
1709 | if (!tx_skb) | ||
1710 | break; | ||
1711 | |||
1712 | __set_retrans_timer(chan); | ||
1713 | |||
1714 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); | ||
1715 | chan->unacked_frames++; | ||
1716 | chan->frames_sent++; | ||
1717 | sent++; | ||
1718 | |||
1719 | if (skb_queue_is_last(&chan->tx_q, skb)) | ||
1720 | chan->tx_send_head = NULL; | ||
1721 | else | ||
1722 | chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); | ||
1723 | |||
1724 | l2cap_do_send(chan, tx_skb); | ||
1725 | BT_DBG("Sent txseq %d", (int)control->txseq); | ||
1701 | } | 1726 | } |
1702 | 1727 | ||
1703 | l2cap_do_send(chan, tx_skb); | 1728 | BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent, |
1729 | (int) chan->unacked_frames, skb_queue_len(&chan->tx_q)); | ||
1730 | |||
1731 | return sent; | ||
1704 | } | 1732 | } |
1705 | 1733 | ||
1706 | static int l2cap_ertm_send(struct l2cap_chan *chan) | 1734 | static void l2cap_ertm_resend(struct l2cap_chan *chan) |
1707 | { | 1735 | { |
1708 | struct sk_buff *skb, *tx_skb; | 1736 | struct l2cap_ctrl control; |
1709 | u16 fcs; | 1737 | struct sk_buff *skb; |
1710 | u32 control; | 1738 | struct sk_buff *tx_skb; |
1711 | int nsent = 0; | 1739 | u16 seq; |
1712 | 1740 | ||
1713 | if (chan->state != BT_CONNECTED) | 1741 | BT_DBG("chan %p", chan); |
1714 | return -ENOTCONN; | ||
1715 | 1742 | ||
1716 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) | 1743 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
1717 | return 0; | 1744 | return; |
1718 | 1745 | ||
1719 | while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { | 1746 | while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { |
1747 | seq = l2cap_seq_list_pop(&chan->retrans_list); | ||
1720 | 1748 | ||
1721 | if (bt_cb(skb)->control.retries == chan->remote_max_tx && | 1749 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); |
1722 | chan->remote_max_tx) { | 1750 | if (!skb) { |
1723 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); | 1751 | BT_DBG("Error: Can't retransmit seq %d, frame missing", |
1724 | break; | 1752 | seq); |
1753 | continue; | ||
1725 | } | 1754 | } |
1726 | 1755 | ||
1727 | tx_skb = skb_clone(skb, GFP_ATOMIC); | ||
1728 | |||
1729 | bt_cb(skb)->control.retries++; | 1756 | bt_cb(skb)->control.retries++; |
1757 | control = bt_cb(skb)->control; | ||
1730 | 1758 | ||
1731 | control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); | 1759 | if (chan->max_tx != 0 && |
1732 | control &= __get_sar_mask(chan); | 1760 | bt_cb(skb)->control.retries > chan->max_tx) { |
1761 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); | ||
1762 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
1763 | l2cap_seq_list_clear(&chan->retrans_list); | ||
1764 | break; | ||
1765 | } | ||
1733 | 1766 | ||
1767 | control.reqseq = chan->buffer_seq; | ||
1734 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 1768 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
1735 | control |= __set_ctrl_final(chan); | 1769 | control.final = 1; |
1770 | else | ||
1771 | control.final = 0; | ||
1736 | 1772 | ||
1737 | control |= __set_reqseq(chan, chan->buffer_seq); | 1773 | if (skb_cloned(skb)) { |
1738 | control |= __set_txseq(chan, chan->next_tx_seq); | 1774 | /* Cloned sk_buffs are read-only, so we need a |
1739 | control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); | 1775 | * writeable copy |
1776 | */ | ||
1777 | tx_skb = skb_copy(skb, GFP_ATOMIC); | ||
1778 | } else { | ||
1779 | tx_skb = skb_clone(skb, GFP_ATOMIC); | ||
1780 | } | ||
1740 | 1781 | ||
1741 | __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); | 1782 | if (!tx_skb) { |
1783 | l2cap_seq_list_clear(&chan->retrans_list); | ||
1784 | break; | ||
1785 | } | ||
1786 | |||
1787 | /* Update skb contents */ | ||
1788 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { | ||
1789 | put_unaligned_le32(__pack_extended_control(&control), | ||
1790 | tx_skb->data + L2CAP_HDR_SIZE); | ||
1791 | } else { | ||
1792 | put_unaligned_le16(__pack_enhanced_control(&control), | ||
1793 | tx_skb->data + L2CAP_HDR_SIZE); | ||
1794 | } | ||
1742 | 1795 | ||
1743 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1796 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1744 | fcs = crc16(0, (u8 *)skb->data, | 1797 | u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); |
1745 | tx_skb->len - L2CAP_FCS_SIZE); | 1798 | put_unaligned_le16(fcs, skb_put(tx_skb, |
1746 | put_unaligned_le16(fcs, skb->data + | 1799 | L2CAP_FCS_SIZE)); |
1747 | tx_skb->len - L2CAP_FCS_SIZE); | ||
1748 | } | 1800 | } |
1749 | 1801 | ||
1750 | l2cap_do_send(chan, tx_skb); | 1802 | l2cap_do_send(chan, tx_skb); |
1751 | 1803 | ||
1752 | __set_retrans_timer(chan); | 1804 | BT_DBG("Resent txseq %d", control.txseq); |
1753 | |||
1754 | bt_cb(skb)->control.txseq = chan->next_tx_seq; | ||
1755 | |||
1756 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); | ||
1757 | |||
1758 | if (bt_cb(skb)->control.retries == 1) { | ||
1759 | chan->unacked_frames++; | ||
1760 | |||
1761 | if (!nsent++) | ||
1762 | __clear_ack_timer(chan); | ||
1763 | } | ||
1764 | |||
1765 | chan->frames_sent++; | ||
1766 | 1805 | ||
1767 | if (skb_queue_is_last(&chan->tx_q, skb)) | 1806 | chan->last_acked_seq = chan->buffer_seq; |
1768 | chan->tx_send_head = NULL; | ||
1769 | else | ||
1770 | chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); | ||
1771 | } | 1807 | } |
1772 | |||
1773 | return nsent; | ||
1774 | } | 1808 | } |
1775 | 1809 | ||
1776 | static int l2cap_retransmit_frames(struct l2cap_chan *chan) | 1810 | static void l2cap_retransmit(struct l2cap_chan *chan, |
1811 | struct l2cap_ctrl *control) | ||
1777 | { | 1812 | { |
1778 | int ret; | 1813 | BT_DBG("chan %p, control %p", chan, control); |
1779 | |||
1780 | if (!skb_queue_empty(&chan->tx_q)) | ||
1781 | chan->tx_send_head = chan->tx_q.next; | ||
1782 | 1814 | ||
1783 | chan->next_tx_seq = chan->expected_ack_seq; | 1815 | l2cap_seq_list_append(&chan->retrans_list, control->reqseq); |
1784 | ret = l2cap_ertm_send(chan); | 1816 | l2cap_ertm_resend(chan); |
1785 | return ret; | ||
1786 | } | 1817 | } |
1787 | 1818 | ||
1788 | static void __l2cap_send_ack(struct l2cap_chan *chan) | 1819 | static void l2cap_retransmit_all(struct l2cap_chan *chan, |
1820 | struct l2cap_ctrl *control) | ||
1789 | { | 1821 | { |
1790 | u32 control = 0; | 1822 | struct sk_buff *skb; |
1791 | 1823 | ||
1792 | control |= __set_reqseq(chan, chan->buffer_seq); | 1824 | BT_DBG("chan %p, control %p", chan, control); |
1793 | 1825 | ||
1794 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 1826 | if (control->poll) |
1795 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); | 1827 | set_bit(CONN_SEND_FBIT, &chan->conn_state); |
1796 | set_bit(CONN_RNR_SENT, &chan->conn_state); | ||
1797 | l2cap_send_sframe(chan, control); | ||
1798 | return; | ||
1799 | } | ||
1800 | 1828 | ||
1801 | if (l2cap_ertm_send(chan) > 0) | 1829 | l2cap_seq_list_clear(&chan->retrans_list); |
1830 | |||
1831 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) | ||
1802 | return; | 1832 | return; |
1803 | 1833 | ||
1804 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); | 1834 | if (chan->unacked_frames) { |
1805 | l2cap_send_sframe(chan, control); | 1835 | skb_queue_walk(&chan->tx_q, skb) { |
1836 | if (bt_cb(skb)->control.txseq == control->reqseq || | ||
1837 | skb == chan->tx_send_head) | ||
1838 | break; | ||
1839 | } | ||
1840 | |||
1841 | skb_queue_walk_from(&chan->tx_q, skb) { | ||
1842 | if (skb == chan->tx_send_head) | ||
1843 | break; | ||
1844 | |||
1845 | l2cap_seq_list_append(&chan->retrans_list, | ||
1846 | bt_cb(skb)->control.txseq); | ||
1847 | } | ||
1848 | |||
1849 | l2cap_ertm_resend(chan); | ||
1850 | } | ||
1806 | } | 1851 | } |
1807 | 1852 | ||
1808 | static void l2cap_send_ack(struct l2cap_chan *chan) | 1853 | static void l2cap_send_ack(struct l2cap_chan *chan) |
1809 | { | 1854 | { |
1810 | __clear_ack_timer(chan); | 1855 | struct l2cap_ctrl control; |
1811 | __l2cap_send_ack(chan); | 1856 | u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, |
1812 | } | 1857 | chan->last_acked_seq); |
1858 | int threshold; | ||
1813 | 1859 | ||
1814 | static void l2cap_send_srejtail(struct l2cap_chan *chan) | 1860 | BT_DBG("chan %p last_acked_seq %d buffer_seq %d", |
1815 | { | 1861 | chan, chan->last_acked_seq, chan->buffer_seq); |
1816 | struct srej_list *tail; | ||
1817 | u32 control; | ||
1818 | 1862 | ||
1819 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); | 1863 | memset(&control, 0, sizeof(control)); |
1820 | control |= __set_ctrl_final(chan); | 1864 | control.sframe = 1; |
1821 | 1865 | ||
1822 | tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); | 1866 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && |
1823 | control |= __set_reqseq(chan, tail->tx_seq); | 1867 | chan->rx_state == L2CAP_RX_STATE_RECV) { |
1868 | __clear_ack_timer(chan); | ||
1869 | control.super = L2CAP_SUPER_RNR; | ||
1870 | control.reqseq = chan->buffer_seq; | ||
1871 | l2cap_send_sframe(chan, &control); | ||
1872 | } else { | ||
1873 | if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { | ||
1874 | l2cap_ertm_send(chan); | ||
1875 | /* If any i-frames were sent, they included an ack */ | ||
1876 | if (chan->buffer_seq == chan->last_acked_seq) | ||
1877 | frames_to_ack = 0; | ||
1878 | } | ||
1824 | 1879 | ||
1825 | l2cap_send_sframe(chan, control); | 1880 | /* Ack now if the tx window is 3/4ths full. |
1881 | * Calculate without mul or div | ||
1882 | */ | ||
1883 | threshold = chan->tx_win; | ||
1884 | threshold += threshold << 1; | ||
1885 | threshold >>= 2; | ||
1886 | |||
1887 | BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack, | ||
1888 | threshold); | ||
1889 | |||
1890 | if (frames_to_ack >= threshold) { | ||
1891 | __clear_ack_timer(chan); | ||
1892 | control.super = L2CAP_SUPER_RR; | ||
1893 | control.reqseq = chan->buffer_seq; | ||
1894 | l2cap_send_sframe(chan, &control); | ||
1895 | frames_to_ack = 0; | ||
1896 | } | ||
1897 | |||
1898 | if (frames_to_ack) | ||
1899 | __set_ack_timer(chan); | ||
1900 | } | ||
1826 | } | 1901 | } |
1827 | 1902 | ||
1828 | static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, | 1903 | static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, |
@@ -1951,10 +2026,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1951 | if (!conn) | 2026 | if (!conn) |
1952 | return ERR_PTR(-ENOTCONN); | 2027 | return ERR_PTR(-ENOTCONN); |
1953 | 2028 | ||
1954 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | 2029 | hlen = __ertm_hdr_size(chan); |
1955 | hlen = L2CAP_EXT_HDR_SIZE; | ||
1956 | else | ||
1957 | hlen = L2CAP_ENH_HDR_SIZE; | ||
1958 | 2030 | ||
1959 | if (sdulen) | 2031 | if (sdulen) |
1960 | hlen += L2CAP_SDULEN_SIZE; | 2032 | hlen += L2CAP_SDULEN_SIZE; |
@@ -1974,7 +2046,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1974 | lh->cid = cpu_to_le16(chan->dcid); | 2046 | lh->cid = cpu_to_le16(chan->dcid); |
1975 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | 2047 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); |
1976 | 2048 | ||
1977 | __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); | 2049 | /* Control header is populated later */ |
2050 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
2051 | put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); | ||
2052 | else | ||
2053 | put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); | ||
1978 | 2054 | ||
1979 | if (sdulen) | 2055 | if (sdulen) |
1980 | put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); | 2056 | put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); |
@@ -1985,9 +2061,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1985 | return ERR_PTR(err); | 2061 | return ERR_PTR(err); |
1986 | } | 2062 | } |
1987 | 2063 | ||
1988 | if (chan->fcs == L2CAP_FCS_CRC16) | 2064 | bt_cb(skb)->control.fcs = chan->fcs; |
1989 | put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); | ||
1990 | |||
1991 | bt_cb(skb)->control.retries = 0; | 2065 | bt_cb(skb)->control.retries = 0; |
1992 | return skb; | 2066 | return skb; |
1993 | } | 2067 | } |
@@ -1999,7 +2073,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
1999 | struct sk_buff *skb; | 2073 | struct sk_buff *skb; |
2000 | u16 sdu_len; | 2074 | u16 sdu_len; |
2001 | size_t pdu_len; | 2075 | size_t pdu_len; |
2002 | int err = 0; | ||
2003 | u8 sar; | 2076 | u8 sar; |
2004 | 2077 | ||
2005 | BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); | 2078 | BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); |
@@ -2015,7 +2088,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
2015 | pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); | 2088 | pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); |
2016 | 2089 | ||
2017 | /* Adjust for largest possible L2CAP overhead. */ | 2090 | /* Adjust for largest possible L2CAP overhead. */ |
2018 | pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; | 2091 | if (chan->fcs) |
2092 | pdu_len -= L2CAP_FCS_SIZE; | ||
2093 | |||
2094 | pdu_len -= __ertm_hdr_size(chan); | ||
2019 | 2095 | ||
2020 | /* Remote device may have requested smaller PDUs */ | 2096 | /* Remote device may have requested smaller PDUs */ |
2021 | pdu_len = min_t(size_t, pdu_len, chan->remote_mps); | 2097 | pdu_len = min_t(size_t, pdu_len, chan->remote_mps); |
@@ -2055,7 +2131,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
2055 | } | 2131 | } |
2056 | } | 2132 | } |
2057 | 2133 | ||
2058 | return err; | 2134 | return 0; |
2059 | } | 2135 | } |
2060 | 2136 | ||
2061 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, | 2137 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, |
@@ -2117,17 +2193,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, | |||
2117 | if (err) | 2193 | if (err) |
2118 | break; | 2194 | break; |
2119 | 2195 | ||
2120 | if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL) | ||
2121 | chan->tx_send_head = seg_queue.next; | ||
2122 | skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); | ||
2123 | |||
2124 | if (chan->mode == L2CAP_MODE_ERTM) | 2196 | if (chan->mode == L2CAP_MODE_ERTM) |
2125 | err = l2cap_ertm_send(chan); | 2197 | l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); |
2126 | else | 2198 | else |
2127 | l2cap_streaming_send(chan); | 2199 | l2cap_streaming_send(chan, &seg_queue); |
2128 | 2200 | ||
2129 | if (err >= 0) | 2201 | err = len; |
2130 | err = len; | ||
2131 | 2202 | ||
2132 | /* If the skbs were not queued for sending, they'll still be in | 2203 | /* If the skbs were not queued for sending, they'll still be in |
2133 | * seg_queue and need to be purged. | 2204 | * seg_queue and need to be purged. |
@@ -2143,6 +2214,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, | |||
2143 | return err; | 2214 | return err; |
2144 | } | 2215 | } |
2145 | 2216 | ||
2217 | static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) | ||
2218 | { | ||
2219 | struct l2cap_ctrl control; | ||
2220 | u16 seq; | ||
2221 | |||
2222 | BT_DBG("chan %p, txseq %d", chan, txseq); | ||
2223 | |||
2224 | memset(&control, 0, sizeof(control)); | ||
2225 | control.sframe = 1; | ||
2226 | control.super = L2CAP_SUPER_SREJ; | ||
2227 | |||
2228 | for (seq = chan->expected_tx_seq; seq != txseq; | ||
2229 | seq = __next_seq(chan, seq)) { | ||
2230 | if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { | ||
2231 | control.reqseq = seq; | ||
2232 | l2cap_send_sframe(chan, &control); | ||
2233 | l2cap_seq_list_append(&chan->srej_list, seq); | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | chan->expected_tx_seq = __next_seq(chan, txseq); | ||
2238 | } | ||
2239 | |||
2240 | static void l2cap_send_srej_tail(struct l2cap_chan *chan) | ||
2241 | { | ||
2242 | struct l2cap_ctrl control; | ||
2243 | |||
2244 | BT_DBG("chan %p", chan); | ||
2245 | |||
2246 | if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) | ||
2247 | return; | ||
2248 | |||
2249 | memset(&control, 0, sizeof(control)); | ||
2250 | control.sframe = 1; | ||
2251 | control.super = L2CAP_SUPER_SREJ; | ||
2252 | control.reqseq = chan->srej_list.tail; | ||
2253 | l2cap_send_sframe(chan, &control); | ||
2254 | } | ||
2255 | |||
2256 | static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) | ||
2257 | { | ||
2258 | struct l2cap_ctrl control; | ||
2259 | u16 initial_head; | ||
2260 | u16 seq; | ||
2261 | |||
2262 | BT_DBG("chan %p, txseq %d", chan, txseq); | ||
2263 | |||
2264 | memset(&control, 0, sizeof(control)); | ||
2265 | control.sframe = 1; | ||
2266 | control.super = L2CAP_SUPER_SREJ; | ||
2267 | |||
2268 | /* Capture initial list head to allow only one pass through the list. */ | ||
2269 | initial_head = chan->srej_list.head; | ||
2270 | |||
2271 | do { | ||
2272 | seq = l2cap_seq_list_pop(&chan->srej_list); | ||
2273 | if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) | ||
2274 | break; | ||
2275 | |||
2276 | control.reqseq = seq; | ||
2277 | l2cap_send_sframe(chan, &control); | ||
2278 | l2cap_seq_list_append(&chan->srej_list, seq); | ||
2279 | } while (chan->srej_list.head != initial_head); | ||
2280 | } | ||
2281 | |||
2282 | static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) | ||
2283 | { | ||
2284 | struct sk_buff *acked_skb; | ||
2285 | u16 ackseq; | ||
2286 | |||
2287 | BT_DBG("chan %p, reqseq %d", chan, reqseq); | ||
2288 | |||
2289 | if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) | ||
2290 | return; | ||
2291 | |||
2292 | BT_DBG("expected_ack_seq %d, unacked_frames %d", | ||
2293 | chan->expected_ack_seq, chan->unacked_frames); | ||
2294 | |||
2295 | for (ackseq = chan->expected_ack_seq; ackseq != reqseq; | ||
2296 | ackseq = __next_seq(chan, ackseq)) { | ||
2297 | |||
2298 | acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); | ||
2299 | if (acked_skb) { | ||
2300 | skb_unlink(acked_skb, &chan->tx_q); | ||
2301 | kfree_skb(acked_skb); | ||
2302 | chan->unacked_frames--; | ||
2303 | } | ||
2304 | } | ||
2305 | |||
2306 | chan->expected_ack_seq = reqseq; | ||
2307 | |||
2308 | if (chan->unacked_frames == 0) | ||
2309 | __clear_retrans_timer(chan); | ||
2310 | |||
2311 | BT_DBG("unacked_frames %d", (int) chan->unacked_frames); | ||
2312 | } | ||
2313 | |||
2314 | static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) | ||
2315 | { | ||
2316 | BT_DBG("chan %p", chan); | ||
2317 | |||
2318 | chan->expected_tx_seq = chan->buffer_seq; | ||
2319 | l2cap_seq_list_clear(&chan->srej_list); | ||
2320 | skb_queue_purge(&chan->srej_q); | ||
2321 | chan->rx_state = L2CAP_RX_STATE_RECV; | ||
2322 | } | ||
2323 | |||
2324 | static void l2cap_tx_state_xmit(struct l2cap_chan *chan, | ||
2325 | struct l2cap_ctrl *control, | ||
2326 | struct sk_buff_head *skbs, u8 event) | ||
2327 | { | ||
2328 | BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, | ||
2329 | event); | ||
2330 | |||
2331 | switch (event) { | ||
2332 | case L2CAP_EV_DATA_REQUEST: | ||
2333 | if (chan->tx_send_head == NULL) | ||
2334 | chan->tx_send_head = skb_peek(skbs); | ||
2335 | |||
2336 | skb_queue_splice_tail_init(skbs, &chan->tx_q); | ||
2337 | l2cap_ertm_send(chan); | ||
2338 | break; | ||
2339 | case L2CAP_EV_LOCAL_BUSY_DETECTED: | ||
2340 | BT_DBG("Enter LOCAL_BUSY"); | ||
2341 | set_bit(CONN_LOCAL_BUSY, &chan->conn_state); | ||
2342 | |||
2343 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { | ||
2344 | /* The SREJ_SENT state must be aborted if we are to | ||
2345 | * enter the LOCAL_BUSY state. | ||
2346 | */ | ||
2347 | l2cap_abort_rx_srej_sent(chan); | ||
2348 | } | ||
2349 | |||
2350 | l2cap_send_ack(chan); | ||
2351 | |||
2352 | break; | ||
2353 | case L2CAP_EV_LOCAL_BUSY_CLEAR: | ||
2354 | BT_DBG("Exit LOCAL_BUSY"); | ||
2355 | clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); | ||
2356 | |||
2357 | if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { | ||
2358 | struct l2cap_ctrl local_control; | ||
2359 | |||
2360 | memset(&local_control, 0, sizeof(local_control)); | ||
2361 | local_control.sframe = 1; | ||
2362 | local_control.super = L2CAP_SUPER_RR; | ||
2363 | local_control.poll = 1; | ||
2364 | local_control.reqseq = chan->buffer_seq; | ||
2365 | l2cap_send_sframe(chan, &local_control); | ||
2366 | |||
2367 | chan->retry_count = 1; | ||
2368 | __set_monitor_timer(chan); | ||
2369 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; | ||
2370 | } | ||
2371 | break; | ||
2372 | case L2CAP_EV_RECV_REQSEQ_AND_FBIT: | ||
2373 | l2cap_process_reqseq(chan, control->reqseq); | ||
2374 | break; | ||
2375 | case L2CAP_EV_EXPLICIT_POLL: | ||
2376 | l2cap_send_rr_or_rnr(chan, 1); | ||
2377 | chan->retry_count = 1; | ||
2378 | __set_monitor_timer(chan); | ||
2379 | __clear_ack_timer(chan); | ||
2380 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; | ||
2381 | break; | ||
2382 | case L2CAP_EV_RETRANS_TO: | ||
2383 | l2cap_send_rr_or_rnr(chan, 1); | ||
2384 | chan->retry_count = 1; | ||
2385 | __set_monitor_timer(chan); | ||
2386 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; | ||
2387 | break; | ||
2388 | case L2CAP_EV_RECV_FBIT: | ||
2389 | /* Nothing to process */ | ||
2390 | break; | ||
2391 | default: | ||
2392 | break; | ||
2393 | } | ||
2394 | } | ||
2395 | |||
2396 | static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, | ||
2397 | struct l2cap_ctrl *control, | ||
2398 | struct sk_buff_head *skbs, u8 event) | ||
2399 | { | ||
2400 | BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, | ||
2401 | event); | ||
2402 | |||
2403 | switch (event) { | ||
2404 | case L2CAP_EV_DATA_REQUEST: | ||
2405 | if (chan->tx_send_head == NULL) | ||
2406 | chan->tx_send_head = skb_peek(skbs); | ||
2407 | /* Queue data, but don't send. */ | ||
2408 | skb_queue_splice_tail_init(skbs, &chan->tx_q); | ||
2409 | break; | ||
2410 | case L2CAP_EV_LOCAL_BUSY_DETECTED: | ||
2411 | BT_DBG("Enter LOCAL_BUSY"); | ||
2412 | set_bit(CONN_LOCAL_BUSY, &chan->conn_state); | ||
2413 | |||
2414 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { | ||
2415 | /* The SREJ_SENT state must be aborted if we are to | ||
2416 | * enter the LOCAL_BUSY state. | ||
2417 | */ | ||
2418 | l2cap_abort_rx_srej_sent(chan); | ||
2419 | } | ||
2420 | |||
2421 | l2cap_send_ack(chan); | ||
2422 | |||
2423 | break; | ||
2424 | case L2CAP_EV_LOCAL_BUSY_CLEAR: | ||
2425 | BT_DBG("Exit LOCAL_BUSY"); | ||
2426 | clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); | ||
2427 | |||
2428 | if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { | ||
2429 | struct l2cap_ctrl local_control; | ||
2430 | memset(&local_control, 0, sizeof(local_control)); | ||
2431 | local_control.sframe = 1; | ||
2432 | local_control.super = L2CAP_SUPER_RR; | ||
2433 | local_control.poll = 1; | ||
2434 | local_control.reqseq = chan->buffer_seq; | ||
2435 | l2cap_send_sframe(chan, &local_control); | ||
2436 | |||
2437 | chan->retry_count = 1; | ||
2438 | __set_monitor_timer(chan); | ||
2439 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; | ||
2440 | } | ||
2441 | break; | ||
2442 | case L2CAP_EV_RECV_REQSEQ_AND_FBIT: | ||
2443 | l2cap_process_reqseq(chan, control->reqseq); | ||
2444 | |||
2445 | /* Fall through */ | ||
2446 | |||
2447 | case L2CAP_EV_RECV_FBIT: | ||
2448 | if (control && control->final) { | ||
2449 | __clear_monitor_timer(chan); | ||
2450 | if (chan->unacked_frames > 0) | ||
2451 | __set_retrans_timer(chan); | ||
2452 | chan->retry_count = 0; | ||
2453 | chan->tx_state = L2CAP_TX_STATE_XMIT; | ||
2454 | BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); | ||
2455 | } | ||
2456 | break; | ||
2457 | case L2CAP_EV_EXPLICIT_POLL: | ||
2458 | /* Ignore */ | ||
2459 | break; | ||
2460 | case L2CAP_EV_MONITOR_TO: | ||
2461 | if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { | ||
2462 | l2cap_send_rr_or_rnr(chan, 1); | ||
2463 | __set_monitor_timer(chan); | ||
2464 | chan->retry_count++; | ||
2465 | } else { | ||
2466 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); | ||
2467 | } | ||
2468 | break; | ||
2469 | default: | ||
2470 | break; | ||
2471 | } | ||
2472 | } | ||
2473 | |||
2474 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, | ||
2475 | struct sk_buff_head *skbs, u8 event) | ||
2476 | { | ||
2477 | BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", | ||
2478 | chan, control, skbs, event, chan->tx_state); | ||
2479 | |||
2480 | switch (chan->tx_state) { | ||
2481 | case L2CAP_TX_STATE_XMIT: | ||
2482 | l2cap_tx_state_xmit(chan, control, skbs, event); | ||
2483 | break; | ||
2484 | case L2CAP_TX_STATE_WAIT_F: | ||
2485 | l2cap_tx_state_wait_f(chan, control, skbs, event); | ||
2486 | break; | ||
2487 | default: | ||
2488 | /* Ignore event */ | ||
2489 | break; | ||
2490 | } | ||
2491 | } | ||
2492 | |||
2493 | static void l2cap_pass_to_tx(struct l2cap_chan *chan, | ||
2494 | struct l2cap_ctrl *control) | ||
2495 | { | ||
2496 | BT_DBG("chan %p, control %p", chan, control); | ||
2497 | l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); | ||
2498 | } | ||
2499 | |||
2500 | static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, | ||
2501 | struct l2cap_ctrl *control) | ||
2502 | { | ||
2503 | BT_DBG("chan %p, control %p", chan, control); | ||
2504 | l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); | ||
2505 | } | ||
2506 | |||
2146 | /* Copy frame to all raw sockets on that connection */ | 2507 | /* Copy frame to all raw sockets on that connection */ |
2147 | static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | 2508 | static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) |
2148 | { | 2509 | { |
@@ -2165,7 +2526,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2165 | if (!nskb) | 2526 | if (!nskb) |
2166 | continue; | 2527 | continue; |
2167 | 2528 | ||
2168 | if (chan->ops->recv(chan->data, nskb)) | 2529 | if (chan->ops->recv(chan, nskb)) |
2169 | kfree_skb(nskb); | 2530 | kfree_skb(nskb); |
2170 | } | 2531 | } |
2171 | 2532 | ||
@@ -2195,9 +2556,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, | |||
2195 | lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); | 2556 | lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); |
2196 | 2557 | ||
2197 | if (conn->hcon->type == LE_LINK) | 2558 | if (conn->hcon->type == LE_LINK) |
2198 | lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); | 2559 | lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING); |
2199 | else | 2560 | else |
2200 | lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); | 2561 | lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING); |
2201 | 2562 | ||
2202 | cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); | 2563 | cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); |
2203 | cmd->code = code; | 2564 | cmd->code = code; |
@@ -2309,8 +2670,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) | |||
2309 | efs.stype = chan->local_stype; | 2670 | efs.stype = chan->local_stype; |
2310 | efs.msdu = cpu_to_le16(chan->local_msdu); | 2671 | efs.msdu = cpu_to_le16(chan->local_msdu); |
2311 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); | 2672 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); |
2312 | efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); | 2673 | efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); |
2313 | efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); | 2674 | efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); |
2314 | break; | 2675 | break; |
2315 | 2676 | ||
2316 | case L2CAP_MODE_STREAMING: | 2677 | case L2CAP_MODE_STREAMING: |
@@ -2333,20 +2694,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) | |||
2333 | static void l2cap_ack_timeout(struct work_struct *work) | 2694 | static void l2cap_ack_timeout(struct work_struct *work) |
2334 | { | 2695 | { |
2335 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, | 2696 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
2336 | ack_timer.work); | 2697 | ack_timer.work); |
2698 | u16 frames_to_ack; | ||
2337 | 2699 | ||
2338 | BT_DBG("chan %p", chan); | 2700 | BT_DBG("chan %p", chan); |
2339 | 2701 | ||
2340 | l2cap_chan_lock(chan); | 2702 | l2cap_chan_lock(chan); |
2341 | 2703 | ||
2342 | __l2cap_send_ack(chan); | 2704 | frames_to_ack = __seq_offset(chan, chan->buffer_seq, |
2705 | chan->last_acked_seq); | ||
2343 | 2706 | ||
2344 | l2cap_chan_unlock(chan); | 2707 | if (frames_to_ack) |
2708 | l2cap_send_rr_or_rnr(chan, 0); | ||
2345 | 2709 | ||
2710 | l2cap_chan_unlock(chan); | ||
2346 | l2cap_chan_put(chan); | 2711 | l2cap_chan_put(chan); |
2347 | } | 2712 | } |
2348 | 2713 | ||
2349 | static inline int l2cap_ertm_init(struct l2cap_chan *chan) | 2714 | int l2cap_ertm_init(struct l2cap_chan *chan) |
2350 | { | 2715 | { |
2351 | int err; | 2716 | int err; |
2352 | 2717 | ||
@@ -2355,7 +2720,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan) | |||
2355 | chan->expected_ack_seq = 0; | 2720 | chan->expected_ack_seq = 0; |
2356 | chan->unacked_frames = 0; | 2721 | chan->unacked_frames = 0; |
2357 | chan->buffer_seq = 0; | 2722 | chan->buffer_seq = 0; |
2358 | chan->num_acked = 0; | ||
2359 | chan->frames_sent = 0; | 2723 | chan->frames_sent = 0; |
2360 | chan->last_acked_seq = 0; | 2724 | chan->last_acked_seq = 0; |
2361 | chan->sdu = NULL; | 2725 | chan->sdu = NULL; |
@@ -2376,12 +2740,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan) | |||
2376 | 2740 | ||
2377 | skb_queue_head_init(&chan->srej_q); | 2741 | skb_queue_head_init(&chan->srej_q); |
2378 | 2742 | ||
2379 | INIT_LIST_HEAD(&chan->srej_l); | ||
2380 | err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); | 2743 | err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); |
2381 | if (err < 0) | 2744 | if (err < 0) |
2382 | return err; | 2745 | return err; |
2383 | 2746 | ||
2384 | return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); | 2747 | err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); |
2748 | if (err < 0) | ||
2749 | l2cap_seq_list_free(&chan->srej_list); | ||
2750 | |||
2751 | return err; | ||
2385 | } | 2752 | } |
2386 | 2753 | ||
2387 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) | 2754 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) |
@@ -2507,6 +2874,7 @@ done: | |||
2507 | break; | 2874 | break; |
2508 | 2875 | ||
2509 | case L2CAP_MODE_STREAMING: | 2876 | case L2CAP_MODE_STREAMING: |
2877 | l2cap_txwin_setup(chan); | ||
2510 | rfc.mode = L2CAP_MODE_STREAMING; | 2878 | rfc.mode = L2CAP_MODE_STREAMING; |
2511 | rfc.txwin_size = 0; | 2879 | rfc.txwin_size = 0; |
2512 | rfc.max_transmit = 0; | 2880 | rfc.max_transmit = 0; |
@@ -2537,7 +2905,7 @@ done: | |||
2537 | } | 2905 | } |
2538 | 2906 | ||
2539 | req->dcid = cpu_to_le16(chan->dcid); | 2907 | req->dcid = cpu_to_le16(chan->dcid); |
2540 | req->flags = cpu_to_le16(0); | 2908 | req->flags = __constant_cpu_to_le16(0); |
2541 | 2909 | ||
2542 | return ptr - data; | 2910 | return ptr - data; |
2543 | } | 2911 | } |
@@ -2757,7 +3125,7 @@ done: | |||
2757 | } | 3125 | } |
2758 | rsp->scid = cpu_to_le16(chan->dcid); | 3126 | rsp->scid = cpu_to_le16(chan->dcid); |
2759 | rsp->result = cpu_to_le16(result); | 3127 | rsp->result = cpu_to_le16(result); |
2760 | rsp->flags = cpu_to_le16(0x0000); | 3128 | rsp->flags = __constant_cpu_to_le16(0); |
2761 | 3129 | ||
2762 | return ptr - data; | 3130 | return ptr - data; |
2763 | } | 3131 | } |
@@ -2856,7 +3224,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
2856 | } | 3224 | } |
2857 | 3225 | ||
2858 | req->dcid = cpu_to_le16(chan->dcid); | 3226 | req->dcid = cpu_to_le16(chan->dcid); |
2859 | req->flags = cpu_to_le16(0x0000); | 3227 | req->flags = __constant_cpu_to_le16(0); |
2860 | 3228 | ||
2861 | return ptr - data; | 3229 | return ptr - data; |
2862 | } | 3230 | } |
@@ -2883,8 +3251,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) | |||
2883 | 3251 | ||
2884 | rsp.scid = cpu_to_le16(chan->dcid); | 3252 | rsp.scid = cpu_to_le16(chan->dcid); |
2885 | rsp.dcid = cpu_to_le16(chan->scid); | 3253 | rsp.dcid = cpu_to_le16(chan->scid); |
2886 | rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); | 3254 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); |
2887 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); | 3255 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
2888 | l2cap_send_cmd(conn, chan->ident, | 3256 | l2cap_send_cmd(conn, chan->ident, |
2889 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 3257 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); |
2890 | 3258 | ||
@@ -2922,8 +3290,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) | |||
2922 | * did not send an RFC option. | 3290 | * did not send an RFC option. |
2923 | */ | 3291 | */ |
2924 | rfc.mode = chan->mode; | 3292 | rfc.mode = chan->mode; |
2925 | rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); | 3293 | rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); |
2926 | rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); | 3294 | rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); |
2927 | rfc.max_pdu_size = cpu_to_le16(chan->imtu); | 3295 | rfc.max_pdu_size = cpu_to_le16(chan->imtu); |
2928 | 3296 | ||
2929 | BT_ERR("Expected RFC option was not found, using defaults"); | 3297 | BT_ERR("Expected RFC option was not found, using defaults"); |
@@ -2986,7 +3354,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2986 | lock_sock(parent); | 3354 | lock_sock(parent); |
2987 | 3355 | ||
2988 | /* Check if the ACL is secure enough (if not SDP) */ | 3356 | /* Check if the ACL is secure enough (if not SDP) */ |
2989 | if (psm != cpu_to_le16(0x0001) && | 3357 | if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && |
2990 | !hci_conn_check_link_mode(conn->hcon)) { | 3358 | !hci_conn_check_link_mode(conn->hcon)) { |
2991 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; | 3359 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; |
2992 | result = L2CAP_CR_SEC_BLOCK; | 3360 | result = L2CAP_CR_SEC_BLOCK; |
@@ -2995,25 +3363,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2995 | 3363 | ||
2996 | result = L2CAP_CR_NO_MEM; | 3364 | result = L2CAP_CR_NO_MEM; |
2997 | 3365 | ||
2998 | /* Check for backlog size */ | 3366 | /* Check if we already have channel with that dcid */ |
2999 | if (sk_acceptq_is_full(parent)) { | 3367 | if (__l2cap_get_chan_by_dcid(conn, scid)) |
3000 | BT_DBG("backlog full %d", parent->sk_ack_backlog); | ||
3001 | goto response; | 3368 | goto response; |
3002 | } | ||
3003 | 3369 | ||
3004 | chan = pchan->ops->new_connection(pchan->data); | 3370 | chan = pchan->ops->new_connection(pchan); |
3005 | if (!chan) | 3371 | if (!chan) |
3006 | goto response; | 3372 | goto response; |
3007 | 3373 | ||
3008 | sk = chan->sk; | 3374 | sk = chan->sk; |
3009 | 3375 | ||
3010 | /* Check if we already have channel with that dcid */ | ||
3011 | if (__l2cap_get_chan_by_dcid(conn, scid)) { | ||
3012 | sock_set_flag(sk, SOCK_ZAPPED); | ||
3013 | chan->ops->close(chan->data); | ||
3014 | goto response; | ||
3015 | } | ||
3016 | |||
3017 | hci_conn_hold(conn->hcon); | 3376 | hci_conn_hold(conn->hcon); |
3018 | 3377 | ||
3019 | bacpy(&bt_sk(sk)->src, conn->src); | 3378 | bacpy(&bt_sk(sk)->src, conn->src); |
@@ -3067,7 +3426,7 @@ sendresp: | |||
3067 | 3426 | ||
3068 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { | 3427 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { |
3069 | struct l2cap_info_req info; | 3428 | struct l2cap_info_req info; |
3070 | info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | 3429 | info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); |
3071 | 3430 | ||
3072 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | 3431 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
3073 | conn->info_ident = l2cap_get_ident(conn); | 3432 | conn->info_ident = l2cap_get_ident(conn); |
@@ -3189,7 +3548,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3189 | if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { | 3548 | if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { |
3190 | struct l2cap_cmd_rej_cid rej; | 3549 | struct l2cap_cmd_rej_cid rej; |
3191 | 3550 | ||
3192 | rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); | 3551 | rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID); |
3193 | rej.scid = cpu_to_le16(chan->scid); | 3552 | rej.scid = cpu_to_le16(chan->scid); |
3194 | rej.dcid = cpu_to_le16(chan->dcid); | 3553 | rej.dcid = cpu_to_le16(chan->dcid); |
3195 | 3554 | ||
@@ -3211,11 +3570,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3211 | memcpy(chan->conf_req + chan->conf_len, req->data, len); | 3570 | memcpy(chan->conf_req + chan->conf_len, req->data, len); |
3212 | chan->conf_len += len; | 3571 | chan->conf_len += len; |
3213 | 3572 | ||
3214 | if (flags & 0x0001) { | 3573 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) { |
3215 | /* Incomplete config. Send empty response. */ | 3574 | /* Incomplete config. Send empty response. */ |
3216 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3575 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
3217 | l2cap_build_conf_rsp(chan, rsp, | 3576 | l2cap_build_conf_rsp(chan, rsp, |
3218 | L2CAP_CONF_SUCCESS, 0x0001), rsp); | 3577 | L2CAP_CONF_SUCCESS, flags), rsp); |
3219 | goto unlock; | 3578 | goto unlock; |
3220 | } | 3579 | } |
3221 | 3580 | ||
@@ -3238,8 +3597,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3238 | if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { | 3597 | if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { |
3239 | set_default_fcs(chan); | 3598 | set_default_fcs(chan); |
3240 | 3599 | ||
3241 | l2cap_state_change(chan, BT_CONNECTED); | ||
3242 | |||
3243 | if (chan->mode == L2CAP_MODE_ERTM || | 3600 | if (chan->mode == L2CAP_MODE_ERTM || |
3244 | chan->mode == L2CAP_MODE_STREAMING) | 3601 | chan->mode == L2CAP_MODE_STREAMING) |
3245 | err = l2cap_ertm_init(chan); | 3602 | err = l2cap_ertm_init(chan); |
@@ -3271,7 +3628,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3271 | 3628 | ||
3272 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3629 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
3273 | l2cap_build_conf_rsp(chan, rsp, | 3630 | l2cap_build_conf_rsp(chan, rsp, |
3274 | L2CAP_CONF_SUCCESS, 0x0000), rsp); | 3631 | L2CAP_CONF_SUCCESS, flags), rsp); |
3275 | } | 3632 | } |
3276 | 3633 | ||
3277 | unlock: | 3634 | unlock: |
@@ -3362,7 +3719,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3362 | goto done; | 3719 | goto done; |
3363 | } | 3720 | } |
3364 | 3721 | ||
3365 | if (flags & 0x01) | 3722 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) |
3366 | goto done; | 3723 | goto done; |
3367 | 3724 | ||
3368 | set_bit(CONF_INPUT_DONE, &chan->conf_state); | 3725 | set_bit(CONF_INPUT_DONE, &chan->conf_state); |
@@ -3370,7 +3727,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3370 | if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { | 3727 | if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { |
3371 | set_default_fcs(chan); | 3728 | set_default_fcs(chan); |
3372 | 3729 | ||
3373 | l2cap_state_change(chan, BT_CONNECTED); | ||
3374 | if (chan->mode == L2CAP_MODE_ERTM || | 3730 | if (chan->mode == L2CAP_MODE_ERTM || |
3375 | chan->mode == L2CAP_MODE_STREAMING) | 3731 | chan->mode == L2CAP_MODE_STREAMING) |
3376 | err = l2cap_ertm_init(chan); | 3732 | err = l2cap_ertm_init(chan); |
@@ -3424,7 +3780,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd | |||
3424 | 3780 | ||
3425 | l2cap_chan_unlock(chan); | 3781 | l2cap_chan_unlock(chan); |
3426 | 3782 | ||
3427 | chan->ops->close(chan->data); | 3783 | chan->ops->close(chan); |
3428 | l2cap_chan_put(chan); | 3784 | l2cap_chan_put(chan); |
3429 | 3785 | ||
3430 | mutex_unlock(&conn->chan_lock); | 3786 | mutex_unlock(&conn->chan_lock); |
@@ -3458,7 +3814,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
3458 | 3814 | ||
3459 | l2cap_chan_unlock(chan); | 3815 | l2cap_chan_unlock(chan); |
3460 | 3816 | ||
3461 | chan->ops->close(chan->data); | 3817 | chan->ops->close(chan); |
3462 | l2cap_chan_put(chan); | 3818 | l2cap_chan_put(chan); |
3463 | 3819 | ||
3464 | mutex_unlock(&conn->chan_lock); | 3820 | mutex_unlock(&conn->chan_lock); |
@@ -3479,8 +3835,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
3479 | u8 buf[8]; | 3835 | u8 buf[8]; |
3480 | u32 feat_mask = l2cap_feat_mask; | 3836 | u32 feat_mask = l2cap_feat_mask; |
3481 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; | 3837 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
3482 | rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | 3838 | rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); |
3483 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); | 3839 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); |
3484 | if (!disable_ertm) | 3840 | if (!disable_ertm) |
3485 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | 3841 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING |
3486 | | L2CAP_FEAT_FCS; | 3842 | | L2CAP_FEAT_FCS; |
@@ -3500,15 +3856,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
3500 | else | 3856 | else |
3501 | l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; | 3857 | l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; |
3502 | 3858 | ||
3503 | rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); | 3859 | rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
3504 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); | 3860 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); |
3505 | memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); | 3861 | memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); |
3506 | l2cap_send_cmd(conn, cmd->ident, | 3862 | l2cap_send_cmd(conn, cmd->ident, |
3507 | L2CAP_INFO_RSP, sizeof(buf), buf); | 3863 | L2CAP_INFO_RSP, sizeof(buf), buf); |
3508 | } else { | 3864 | } else { |
3509 | struct l2cap_info_rsp rsp; | 3865 | struct l2cap_info_rsp rsp; |
3510 | rsp.type = cpu_to_le16(type); | 3866 | rsp.type = cpu_to_le16(type); |
3511 | rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); | 3867 | rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); |
3512 | l2cap_send_cmd(conn, cmd->ident, | 3868 | l2cap_send_cmd(conn, cmd->ident, |
3513 | L2CAP_INFO_RSP, sizeof(rsp), &rsp); | 3869 | L2CAP_INFO_RSP, sizeof(rsp), &rsp); |
3514 | } | 3870 | } |
@@ -3548,7 +3904,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
3548 | 3904 | ||
3549 | if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { | 3905 | if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { |
3550 | struct l2cap_info_req req; | 3906 | struct l2cap_info_req req; |
3551 | req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); | 3907 | req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
3552 | 3908 | ||
3553 | conn->info_ident = l2cap_get_ident(conn); | 3909 | conn->info_ident = l2cap_get_ident(conn); |
3554 | 3910 | ||
@@ -3783,9 +4139,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, | |||
3783 | 4139 | ||
3784 | err = l2cap_check_conn_param(min, max, latency, to_multiplier); | 4140 | err = l2cap_check_conn_param(min, max, latency, to_multiplier); |
3785 | if (err) | 4141 | if (err) |
3786 | rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); | 4142 | rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); |
3787 | else | 4143 | else |
3788 | rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); | 4144 | rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); |
3789 | 4145 | ||
3790 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, | 4146 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, |
3791 | sizeof(rsp), &rsp); | 4147 | sizeof(rsp), &rsp); |
@@ -3933,7 +4289,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, | |||
3933 | BT_ERR("Wrong link type (%d)", err); | 4289 | BT_ERR("Wrong link type (%d)", err); |
3934 | 4290 | ||
3935 | /* FIXME: Map err to a valid reason */ | 4291 | /* FIXME: Map err to a valid reason */ |
3936 | rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); | 4292 | rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); |
3937 | l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); | 4293 | l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); |
3938 | } | 4294 | } |
3939 | 4295 | ||
@@ -3965,65 +4321,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) | |||
3965 | return 0; | 4321 | return 0; |
3966 | } | 4322 | } |
3967 | 4323 | ||
3968 | static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) | 4324 | static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) |
3969 | { | 4325 | { |
3970 | u32 control = 0; | 4326 | struct l2cap_ctrl control; |
3971 | 4327 | ||
3972 | chan->frames_sent = 0; | 4328 | BT_DBG("chan %p", chan); |
3973 | 4329 | ||
3974 | control |= __set_reqseq(chan, chan->buffer_seq); | 4330 | memset(&control, 0, sizeof(control)); |
4331 | control.sframe = 1; | ||
4332 | control.final = 1; | ||
4333 | control.reqseq = chan->buffer_seq; | ||
4334 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | ||
3975 | 4335 | ||
3976 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 4336 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
3977 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); | 4337 | control.super = L2CAP_SUPER_RNR; |
3978 | l2cap_send_sframe(chan, control); | 4338 | l2cap_send_sframe(chan, &control); |
3979 | set_bit(CONN_RNR_SENT, &chan->conn_state); | ||
3980 | } | 4339 | } |
3981 | 4340 | ||
3982 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) | 4341 | if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && |
3983 | l2cap_retransmit_frames(chan); | 4342 | chan->unacked_frames > 0) |
4343 | __set_retrans_timer(chan); | ||
3984 | 4344 | ||
4345 | /* Send pending iframes */ | ||
3985 | l2cap_ertm_send(chan); | 4346 | l2cap_ertm_send(chan); |
3986 | 4347 | ||
3987 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && | 4348 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && |
3988 | chan->frames_sent == 0) { | 4349 | test_bit(CONN_SEND_FBIT, &chan->conn_state)) { |
3989 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); | 4350 | /* F-bit wasn't sent in an s-frame or i-frame yet, so |
3990 | l2cap_send_sframe(chan, control); | 4351 | * send it now. |
3991 | } | 4352 | */ |
3992 | } | 4353 | control.super = L2CAP_SUPER_RR; |
3993 | 4354 | l2cap_send_sframe(chan, &control); | |
3994 | static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) | ||
3995 | { | ||
3996 | struct sk_buff *next_skb; | ||
3997 | int tx_seq_offset, next_tx_seq_offset; | ||
3998 | |||
3999 | bt_cb(skb)->control.txseq = tx_seq; | ||
4000 | bt_cb(skb)->control.sar = sar; | ||
4001 | |||
4002 | next_skb = skb_peek(&chan->srej_q); | ||
4003 | |||
4004 | tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); | ||
4005 | |||
4006 | while (next_skb) { | ||
4007 | if (bt_cb(next_skb)->control.txseq == tx_seq) | ||
4008 | return -EINVAL; | ||
4009 | |||
4010 | next_tx_seq_offset = __seq_offset(chan, | ||
4011 | bt_cb(next_skb)->control.txseq, chan->buffer_seq); | ||
4012 | |||
4013 | if (next_tx_seq_offset > tx_seq_offset) { | ||
4014 | __skb_queue_before(&chan->srej_q, next_skb, skb); | ||
4015 | return 0; | ||
4016 | } | ||
4017 | |||
4018 | if (skb_queue_is_last(&chan->srej_q, next_skb)) | ||
4019 | next_skb = NULL; | ||
4020 | else | ||
4021 | next_skb = skb_queue_next(&chan->srej_q, next_skb); | ||
4022 | } | 4355 | } |
4023 | |||
4024 | __skb_queue_tail(&chan->srej_q, skb); | ||
4025 | |||
4026 | return 0; | ||
4027 | } | 4356 | } |
4028 | 4357 | ||
4029 | static void append_skb_frag(struct sk_buff *skb, | 4358 | static void append_skb_frag(struct sk_buff *skb, |
@@ -4045,16 +4374,17 @@ static void append_skb_frag(struct sk_buff *skb, | |||
4045 | skb->truesize += new_frag->truesize; | 4374 | skb->truesize += new_frag->truesize; |
4046 | } | 4375 | } |
4047 | 4376 | ||
4048 | static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) | 4377 | static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, |
4378 | struct l2cap_ctrl *control) | ||
4049 | { | 4379 | { |
4050 | int err = -EINVAL; | 4380 | int err = -EINVAL; |
4051 | 4381 | ||
4052 | switch (__get_ctrl_sar(chan, control)) { | 4382 | switch (control->sar) { |
4053 | case L2CAP_SAR_UNSEGMENTED: | 4383 | case L2CAP_SAR_UNSEGMENTED: |
4054 | if (chan->sdu) | 4384 | if (chan->sdu) |
4055 | break; | 4385 | break; |
4056 | 4386 | ||
4057 | err = chan->ops->recv(chan->data, skb); | 4387 | err = chan->ops->recv(chan, skb); |
4058 | break; | 4388 | break; |
4059 | 4389 | ||
4060 | case L2CAP_SAR_START: | 4390 | case L2CAP_SAR_START: |
@@ -4104,7 +4434,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3 | |||
4104 | if (chan->sdu->len != chan->sdu_len) | 4434 | if (chan->sdu->len != chan->sdu_len) |
4105 | break; | 4435 | break; |
4106 | 4436 | ||
4107 | err = chan->ops->recv(chan->data, chan->sdu); | 4437 | err = chan->ops->recv(chan, chan->sdu); |
4108 | 4438 | ||
4109 | if (!err) { | 4439 | if (!err) { |
4110 | /* Reassembly complete */ | 4440 | /* Reassembly complete */ |
@@ -4126,448 +4456,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3 | |||
4126 | return err; | 4456 | return err; |
4127 | } | 4457 | } |
4128 | 4458 | ||
4129 | static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) | 4459 | void l2cap_chan_busy(struct l2cap_chan *chan, int busy) |
4130 | { | 4460 | { |
4131 | BT_DBG("chan %p, Enter local busy", chan); | 4461 | u8 event; |
4132 | 4462 | ||
4133 | set_bit(CONN_LOCAL_BUSY, &chan->conn_state); | 4463 | if (chan->mode != L2CAP_MODE_ERTM) |
4134 | l2cap_seq_list_clear(&chan->srej_list); | 4464 | return; |
4135 | 4465 | ||
4136 | __set_ack_timer(chan); | 4466 | event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; |
4467 | l2cap_tx(chan, NULL, NULL, event); | ||
4137 | } | 4468 | } |
4138 | 4469 | ||
4139 | static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) | 4470 | static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) |
4140 | { | 4471 | { |
4141 | u32 control; | 4472 | int err = 0; |
4142 | 4473 | /* Pass sequential frames to l2cap_reassemble_sdu() | |
4143 | if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) | 4474 | * until a gap is encountered. |
4144 | goto done; | 4475 | */ |
4145 | 4476 | ||
4146 | control = __set_reqseq(chan, chan->buffer_seq); | 4477 | BT_DBG("chan %p", chan); |
4147 | control |= __set_ctrl_poll(chan); | ||
4148 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); | ||
4149 | l2cap_send_sframe(chan, control); | ||
4150 | chan->retry_count = 1; | ||
4151 | 4478 | ||
4152 | __clear_retrans_timer(chan); | 4479 | while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
4153 | __set_monitor_timer(chan); | 4480 | struct sk_buff *skb; |
4481 | BT_DBG("Searching for skb with txseq %d (queue len %d)", | ||
4482 | chan->buffer_seq, skb_queue_len(&chan->srej_q)); | ||
4154 | 4483 | ||
4155 | set_bit(CONN_WAIT_F, &chan->conn_state); | 4484 | skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); |
4156 | 4485 | ||
4157 | done: | 4486 | if (!skb) |
4158 | clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); | 4487 | break; |
4159 | clear_bit(CONN_RNR_SENT, &chan->conn_state); | ||
4160 | 4488 | ||
4161 | BT_DBG("chan %p, Exit local busy", chan); | 4489 | skb_unlink(skb, &chan->srej_q); |
4162 | } | 4490 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); |
4491 | err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); | ||
4492 | if (err) | ||
4493 | break; | ||
4494 | } | ||
4163 | 4495 | ||
4164 | void l2cap_chan_busy(struct l2cap_chan *chan, int busy) | 4496 | if (skb_queue_empty(&chan->srej_q)) { |
4165 | { | 4497 | chan->rx_state = L2CAP_RX_STATE_RECV; |
4166 | if (chan->mode == L2CAP_MODE_ERTM) { | 4498 | l2cap_send_ack(chan); |
4167 | if (busy) | ||
4168 | l2cap_ertm_enter_local_busy(chan); | ||
4169 | else | ||
4170 | l2cap_ertm_exit_local_busy(chan); | ||
4171 | } | 4499 | } |
4500 | |||
4501 | return err; | ||
4172 | } | 4502 | } |
4173 | 4503 | ||
4174 | static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) | 4504 | static void l2cap_handle_srej(struct l2cap_chan *chan, |
4505 | struct l2cap_ctrl *control) | ||
4175 | { | 4506 | { |
4176 | struct sk_buff *skb; | 4507 | struct sk_buff *skb; |
4177 | u32 control; | ||
4178 | 4508 | ||
4179 | while ((skb = skb_peek(&chan->srej_q)) && | 4509 | BT_DBG("chan %p, control %p", chan, control); |
4180 | !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | ||
4181 | int err; | ||
4182 | 4510 | ||
4183 | if (bt_cb(skb)->control.txseq != tx_seq) | 4511 | if (control->reqseq == chan->next_tx_seq) { |
4184 | break; | 4512 | BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); |
4513 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4514 | return; | ||
4515 | } | ||
4185 | 4516 | ||
4186 | skb = skb_dequeue(&chan->srej_q); | 4517 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); |
4187 | control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar); | ||
4188 | err = l2cap_reassemble_sdu(chan, skb, control); | ||
4189 | 4518 | ||
4190 | if (err < 0) { | 4519 | if (skb == NULL) { |
4191 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4520 | BT_DBG("Seq %d not available for retransmission", |
4192 | break; | 4521 | control->reqseq); |
4193 | } | 4522 | return; |
4523 | } | ||
4194 | 4524 | ||
4195 | chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); | 4525 | if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { |
4196 | tx_seq = __next_seq(chan, tx_seq); | 4526 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
4527 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4528 | return; | ||
4197 | } | 4529 | } |
4198 | } | ||
4199 | 4530 | ||
4200 | static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) | 4531 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
4201 | { | ||
4202 | struct srej_list *l, *tmp; | ||
4203 | u32 control; | ||
4204 | 4532 | ||
4205 | list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { | 4533 | if (control->poll) { |
4206 | if (l->tx_seq == tx_seq) { | 4534 | l2cap_pass_to_tx(chan, control); |
4207 | list_del(&l->list); | 4535 | |
4208 | kfree(l); | 4536 | set_bit(CONN_SEND_FBIT, &chan->conn_state); |
4209 | return; | 4537 | l2cap_retransmit(chan, control); |
4538 | l2cap_ertm_send(chan); | ||
4539 | |||
4540 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { | ||
4541 | set_bit(CONN_SREJ_ACT, &chan->conn_state); | ||
4542 | chan->srej_save_reqseq = control->reqseq; | ||
4543 | } | ||
4544 | } else { | ||
4545 | l2cap_pass_to_tx_fbit(chan, control); | ||
4546 | |||
4547 | if (control->final) { | ||
4548 | if (chan->srej_save_reqseq != control->reqseq || | ||
4549 | !test_and_clear_bit(CONN_SREJ_ACT, | ||
4550 | &chan->conn_state)) | ||
4551 | l2cap_retransmit(chan, control); | ||
4552 | } else { | ||
4553 | l2cap_retransmit(chan, control); | ||
4554 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { | ||
4555 | set_bit(CONN_SREJ_ACT, &chan->conn_state); | ||
4556 | chan->srej_save_reqseq = control->reqseq; | ||
4557 | } | ||
4210 | } | 4558 | } |
4211 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); | ||
4212 | control |= __set_reqseq(chan, l->tx_seq); | ||
4213 | l2cap_send_sframe(chan, control); | ||
4214 | list_del(&l->list); | ||
4215 | list_add_tail(&l->list, &chan->srej_l); | ||
4216 | } | 4559 | } |
4217 | } | 4560 | } |
4218 | 4561 | ||
4219 | static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) | 4562 | static void l2cap_handle_rej(struct l2cap_chan *chan, |
4563 | struct l2cap_ctrl *control) | ||
4220 | { | 4564 | { |
4221 | struct srej_list *new; | 4565 | struct sk_buff *skb; |
4222 | u32 control; | ||
4223 | |||
4224 | while (tx_seq != chan->expected_tx_seq) { | ||
4225 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); | ||
4226 | control |= __set_reqseq(chan, chan->expected_tx_seq); | ||
4227 | l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq); | ||
4228 | l2cap_send_sframe(chan, control); | ||
4229 | 4566 | ||
4230 | new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); | 4567 | BT_DBG("chan %p, control %p", chan, control); |
4231 | if (!new) | ||
4232 | return -ENOMEM; | ||
4233 | 4568 | ||
4234 | new->tx_seq = chan->expected_tx_seq; | 4569 | if (control->reqseq == chan->next_tx_seq) { |
4570 | BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); | ||
4571 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4572 | return; | ||
4573 | } | ||
4235 | 4574 | ||
4236 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); | 4575 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); |
4237 | 4576 | ||
4238 | list_add_tail(&new->list, &chan->srej_l); | 4577 | if (chan->max_tx && skb && |
4578 | bt_cb(skb)->control.retries >= chan->max_tx) { | ||
4579 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); | ||
4580 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4581 | return; | ||
4239 | } | 4582 | } |
4240 | 4583 | ||
4241 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); | 4584 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
4242 | 4585 | ||
4243 | return 0; | 4586 | l2cap_pass_to_tx(chan, control); |
4587 | |||
4588 | if (control->final) { | ||
4589 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | ||
4590 | l2cap_retransmit_all(chan, control); | ||
4591 | } else { | ||
4592 | l2cap_retransmit_all(chan, control); | ||
4593 | l2cap_ertm_send(chan); | ||
4594 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) | ||
4595 | set_bit(CONN_REJ_ACT, &chan->conn_state); | ||
4596 | } | ||
4244 | } | 4597 | } |
4245 | 4598 | ||
4246 | static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) | 4599 | static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) |
4247 | { | 4600 | { |
4248 | u16 tx_seq = __get_txseq(chan, rx_control); | 4601 | BT_DBG("chan %p, txseq %d", chan, txseq); |
4249 | u16 req_seq = __get_reqseq(chan, rx_control); | ||
4250 | u8 sar = __get_ctrl_sar(chan, rx_control); | ||
4251 | int tx_seq_offset, expected_tx_seq_offset; | ||
4252 | int num_to_ack = (chan->tx_win/6) + 1; | ||
4253 | int err = 0; | ||
4254 | 4602 | ||
4255 | BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, | 4603 | BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, |
4256 | tx_seq, rx_control); | 4604 | chan->expected_tx_seq); |
4257 | 4605 | ||
4258 | if (__is_ctrl_final(chan, rx_control) && | 4606 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { |
4259 | test_bit(CONN_WAIT_F, &chan->conn_state)) { | 4607 | if (__seq_offset(chan, txseq, chan->last_acked_seq) >= |
4260 | __clear_monitor_timer(chan); | 4608 | chan->tx_win) { |
4261 | if (chan->unacked_frames > 0) | 4609 | /* See notes below regarding "double poll" and |
4262 | __set_retrans_timer(chan); | 4610 | * invalid packets. |
4263 | clear_bit(CONN_WAIT_F, &chan->conn_state); | 4611 | */ |
4264 | } | 4612 | if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { |
4613 | BT_DBG("Invalid/Ignore - after SREJ"); | ||
4614 | return L2CAP_TXSEQ_INVALID_IGNORE; | ||
4615 | } else { | ||
4616 | BT_DBG("Invalid - in window after SREJ sent"); | ||
4617 | return L2CAP_TXSEQ_INVALID; | ||
4618 | } | ||
4619 | } | ||
4265 | 4620 | ||
4266 | chan->expected_ack_seq = req_seq; | 4621 | if (chan->srej_list.head == txseq) { |
4267 | l2cap_drop_acked_frames(chan); | 4622 | BT_DBG("Expected SREJ"); |
4623 | return L2CAP_TXSEQ_EXPECTED_SREJ; | ||
4624 | } | ||
4268 | 4625 | ||
4269 | tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); | 4626 | if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { |
4627 | BT_DBG("Duplicate SREJ - txseq already stored"); | ||
4628 | return L2CAP_TXSEQ_DUPLICATE_SREJ; | ||
4629 | } | ||
4270 | 4630 | ||
4271 | /* invalid tx_seq */ | 4631 | if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { |
4272 | if (tx_seq_offset >= chan->tx_win) { | 4632 | BT_DBG("Unexpected SREJ - not requested"); |
4273 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4633 | return L2CAP_TXSEQ_UNEXPECTED_SREJ; |
4274 | goto drop; | 4634 | } |
4275 | } | 4635 | } |
4276 | 4636 | ||
4277 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 4637 | if (chan->expected_tx_seq == txseq) { |
4278 | if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) | 4638 | if (__seq_offset(chan, txseq, chan->last_acked_seq) >= |
4279 | l2cap_send_ack(chan); | 4639 | chan->tx_win) { |
4280 | goto drop; | 4640 | BT_DBG("Invalid - txseq outside tx window"); |
4641 | return L2CAP_TXSEQ_INVALID; | ||
4642 | } else { | ||
4643 | BT_DBG("Expected"); | ||
4644 | return L2CAP_TXSEQ_EXPECTED; | ||
4645 | } | ||
4281 | } | 4646 | } |
4282 | 4647 | ||
4283 | if (tx_seq == chan->expected_tx_seq) | 4648 | if (__seq_offset(chan, txseq, chan->last_acked_seq) < |
4284 | goto expected; | 4649 | __seq_offset(chan, chan->expected_tx_seq, |
4650 | chan->last_acked_seq)){ | ||
4651 | BT_DBG("Duplicate - expected_tx_seq later than txseq"); | ||
4652 | return L2CAP_TXSEQ_DUPLICATE; | ||
4653 | } | ||
4654 | |||
4655 | if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { | ||
4656 | /* A source of invalid packets is a "double poll" condition, | ||
4657 | * where delays cause us to send multiple poll packets. If | ||
4658 | * the remote stack receives and processes both polls, | ||
4659 | * sequence numbers can wrap around in such a way that a | ||
4660 | * resent frame has a sequence number that looks like new data | ||
4661 | * with a sequence gap. This would trigger an erroneous SREJ | ||
4662 | * request. | ||
4663 | * | ||
4664 | * Fortunately, this is impossible with a tx window that's | ||
4665 | * less than half of the maximum sequence number, which allows | ||
4666 | * invalid frames to be safely ignored. | ||
4667 | * | ||
4668 | * With tx window sizes greater than half of the tx window | ||
4669 | * maximum, the frame is invalid and cannot be ignored. This | ||
4670 | * causes a disconnect. | ||
4671 | */ | ||
4672 | |||
4673 | if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { | ||
4674 | BT_DBG("Invalid/Ignore - txseq outside tx window"); | ||
4675 | return L2CAP_TXSEQ_INVALID_IGNORE; | ||
4676 | } else { | ||
4677 | BT_DBG("Invalid - txseq outside tx window"); | ||
4678 | return L2CAP_TXSEQ_INVALID; | ||
4679 | } | ||
4680 | } else { | ||
4681 | BT_DBG("Unexpected - txseq indicates missing frames"); | ||
4682 | return L2CAP_TXSEQ_UNEXPECTED; | ||
4683 | } | ||
4684 | } | ||
4285 | 4685 | ||
4286 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 4686 | static int l2cap_rx_state_recv(struct l2cap_chan *chan, |
4287 | struct srej_list *first; | 4687 | struct l2cap_ctrl *control, |
4688 | struct sk_buff *skb, u8 event) | ||
4689 | { | ||
4690 | int err = 0; | ||
4691 | bool skb_in_use = 0; | ||
4288 | 4692 | ||
4289 | first = list_first_entry(&chan->srej_l, | 4693 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, |
4290 | struct srej_list, list); | 4694 | event); |
4291 | if (tx_seq == first->tx_seq) { | ||
4292 | l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); | ||
4293 | l2cap_check_srej_gap(chan, tx_seq); | ||
4294 | 4695 | ||
4295 | list_del(&first->list); | 4696 | switch (event) { |
4296 | kfree(first); | 4697 | case L2CAP_EV_RECV_IFRAME: |
4698 | switch (l2cap_classify_txseq(chan, control->txseq)) { | ||
4699 | case L2CAP_TXSEQ_EXPECTED: | ||
4700 | l2cap_pass_to_tx(chan, control); | ||
4297 | 4701 | ||
4298 | if (list_empty(&chan->srej_l)) { | 4702 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
4299 | chan->buffer_seq = chan->buffer_seq_srej; | 4703 | BT_DBG("Busy, discarding expected seq %d", |
4300 | clear_bit(CONN_SREJ_SENT, &chan->conn_state); | 4704 | control->txseq); |
4301 | l2cap_send_ack(chan); | 4705 | break; |
4302 | BT_DBG("chan %p, Exit SREJ_SENT", chan); | ||
4303 | } | 4706 | } |
4304 | } else { | ||
4305 | struct srej_list *l; | ||
4306 | 4707 | ||
4307 | /* duplicated tx_seq */ | 4708 | chan->expected_tx_seq = __next_seq(chan, |
4308 | if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) | 4709 | control->txseq); |
4309 | goto drop; | 4710 | |
4711 | chan->buffer_seq = chan->expected_tx_seq; | ||
4712 | skb_in_use = 1; | ||
4310 | 4713 | ||
4311 | list_for_each_entry(l, &chan->srej_l, list) { | 4714 | err = l2cap_reassemble_sdu(chan, skb, control); |
4312 | if (l->tx_seq == tx_seq) { | 4715 | if (err) |
4313 | l2cap_resend_srejframe(chan, tx_seq); | 4716 | break; |
4314 | return 0; | 4717 | |
4718 | if (control->final) { | ||
4719 | if (!test_and_clear_bit(CONN_REJ_ACT, | ||
4720 | &chan->conn_state)) { | ||
4721 | control->final = 0; | ||
4722 | l2cap_retransmit_all(chan, control); | ||
4723 | l2cap_ertm_send(chan); | ||
4315 | } | 4724 | } |
4316 | } | 4725 | } |
4317 | 4726 | ||
4318 | err = l2cap_send_srejframe(chan, tx_seq); | 4727 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) |
4319 | if (err < 0) { | 4728 | l2cap_send_ack(chan); |
4320 | l2cap_send_disconn_req(chan->conn, chan, -err); | 4729 | break; |
4321 | return err; | 4730 | case L2CAP_TXSEQ_UNEXPECTED: |
4731 | l2cap_pass_to_tx(chan, control); | ||
4732 | |||
4733 | /* Can't issue SREJ frames in the local busy state. | ||
4734 | * Drop this frame, it will be seen as missing | ||
4735 | * when local busy is exited. | ||
4736 | */ | ||
4737 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | ||
4738 | BT_DBG("Busy, discarding unexpected seq %d", | ||
4739 | control->txseq); | ||
4740 | break; | ||
4322 | } | 4741 | } |
4323 | } | ||
4324 | } else { | ||
4325 | expected_tx_seq_offset = __seq_offset(chan, | ||
4326 | chan->expected_tx_seq, chan->buffer_seq); | ||
4327 | 4742 | ||
4328 | /* duplicated tx_seq */ | 4743 | /* There was a gap in the sequence, so an SREJ |
4329 | if (tx_seq_offset < expected_tx_seq_offset) | 4744 | * must be sent for each missing frame. The |
4330 | goto drop; | 4745 | * current frame is stored for later use. |
4331 | 4746 | */ | |
4332 | set_bit(CONN_SREJ_SENT, &chan->conn_state); | 4747 | skb_queue_tail(&chan->srej_q, skb); |
4748 | skb_in_use = 1; | ||
4749 | BT_DBG("Queued %p (queue len %d)", skb, | ||
4750 | skb_queue_len(&chan->srej_q)); | ||
4333 | 4751 | ||
4334 | BT_DBG("chan %p, Enter SREJ", chan); | 4752 | clear_bit(CONN_SREJ_ACT, &chan->conn_state); |
4753 | l2cap_seq_list_clear(&chan->srej_list); | ||
4754 | l2cap_send_srej(chan, control->txseq); | ||
4335 | 4755 | ||
4336 | INIT_LIST_HEAD(&chan->srej_l); | 4756 | chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; |
4337 | chan->buffer_seq_srej = chan->buffer_seq; | 4757 | break; |
4758 | case L2CAP_TXSEQ_DUPLICATE: | ||
4759 | l2cap_pass_to_tx(chan, control); | ||
4760 | break; | ||
4761 | case L2CAP_TXSEQ_INVALID_IGNORE: | ||
4762 | break; | ||
4763 | case L2CAP_TXSEQ_INVALID: | ||
4764 | default: | ||
4765 | l2cap_send_disconn_req(chan->conn, chan, | ||
4766 | ECONNRESET); | ||
4767 | break; | ||
4768 | } | ||
4769 | break; | ||
4770 | case L2CAP_EV_RECV_RR: | ||
4771 | l2cap_pass_to_tx(chan, control); | ||
4772 | if (control->final) { | ||
4773 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4338 | 4774 | ||
4339 | __skb_queue_head_init(&chan->srej_q); | 4775 | if (!test_and_clear_bit(CONN_REJ_ACT, |
4340 | l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); | 4776 | &chan->conn_state)) { |
4777 | control->final = 0; | ||
4778 | l2cap_retransmit_all(chan, control); | ||
4779 | } | ||
4341 | 4780 | ||
4342 | /* Set P-bit only if there are some I-frames to ack. */ | 4781 | l2cap_ertm_send(chan); |
4343 | if (__clear_ack_timer(chan)) | 4782 | } else if (control->poll) { |
4344 | set_bit(CONN_SEND_PBIT, &chan->conn_state); | 4783 | l2cap_send_i_or_rr_or_rnr(chan); |
4784 | } else { | ||
4785 | if (test_and_clear_bit(CONN_REMOTE_BUSY, | ||
4786 | &chan->conn_state) && | ||
4787 | chan->unacked_frames) | ||
4788 | __set_retrans_timer(chan); | ||
4345 | 4789 | ||
4346 | err = l2cap_send_srejframe(chan, tx_seq); | 4790 | l2cap_ertm_send(chan); |
4347 | if (err < 0) { | ||
4348 | l2cap_send_disconn_req(chan->conn, chan, -err); | ||
4349 | return err; | ||
4350 | } | 4791 | } |
4792 | break; | ||
4793 | case L2CAP_EV_RECV_RNR: | ||
4794 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4795 | l2cap_pass_to_tx(chan, control); | ||
4796 | if (control && control->poll) { | ||
4797 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | ||
4798 | l2cap_send_rr_or_rnr(chan, 0); | ||
4799 | } | ||
4800 | __clear_retrans_timer(chan); | ||
4801 | l2cap_seq_list_clear(&chan->retrans_list); | ||
4802 | break; | ||
4803 | case L2CAP_EV_RECV_REJ: | ||
4804 | l2cap_handle_rej(chan, control); | ||
4805 | break; | ||
4806 | case L2CAP_EV_RECV_SREJ: | ||
4807 | l2cap_handle_srej(chan, control); | ||
4808 | break; | ||
4809 | default: | ||
4810 | break; | ||
4351 | } | 4811 | } |
4352 | return 0; | ||
4353 | |||
4354 | expected: | ||
4355 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); | ||
4356 | |||
4357 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | ||
4358 | bt_cb(skb)->control.txseq = tx_seq; | ||
4359 | bt_cb(skb)->control.sar = sar; | ||
4360 | __skb_queue_tail(&chan->srej_q, skb); | ||
4361 | return 0; | ||
4362 | } | ||
4363 | |||
4364 | err = l2cap_reassemble_sdu(chan, skb, rx_control); | ||
4365 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); | ||
4366 | 4812 | ||
4367 | if (err < 0) { | 4813 | if (skb && !skb_in_use) { |
4368 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4814 | BT_DBG("Freeing %p", skb); |
4369 | return err; | 4815 | kfree_skb(skb); |
4370 | } | 4816 | } |
4371 | 4817 | ||
4372 | if (__is_ctrl_final(chan, rx_control)) { | 4818 | return err; |
4373 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | 4819 | } |
4374 | l2cap_retransmit_frames(chan); | ||
4375 | } | ||
4376 | 4820 | ||
4821 | static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, | ||
4822 | struct l2cap_ctrl *control, | ||
4823 | struct sk_buff *skb, u8 event) | ||
4824 | { | ||
4825 | int err = 0; | ||
4826 | u16 txseq = control->txseq; | ||
4827 | bool skb_in_use = 0; | ||
4828 | |||
4829 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, | ||
4830 | event); | ||
4831 | |||
4832 | switch (event) { | ||
4833 | case L2CAP_EV_RECV_IFRAME: | ||
4834 | switch (l2cap_classify_txseq(chan, txseq)) { | ||
4835 | case L2CAP_TXSEQ_EXPECTED: | ||
4836 | /* Keep frame for reassembly later */ | ||
4837 | l2cap_pass_to_tx(chan, control); | ||
4838 | skb_queue_tail(&chan->srej_q, skb); | ||
4839 | skb_in_use = 1; | ||
4840 | BT_DBG("Queued %p (queue len %d)", skb, | ||
4841 | skb_queue_len(&chan->srej_q)); | ||
4842 | |||
4843 | chan->expected_tx_seq = __next_seq(chan, txseq); | ||
4844 | break; | ||
4845 | case L2CAP_TXSEQ_EXPECTED_SREJ: | ||
4846 | l2cap_seq_list_pop(&chan->srej_list); | ||
4377 | 4847 | ||
4378 | chan->num_acked = (chan->num_acked + 1) % num_to_ack; | 4848 | l2cap_pass_to_tx(chan, control); |
4379 | if (chan->num_acked == num_to_ack - 1) | 4849 | skb_queue_tail(&chan->srej_q, skb); |
4380 | l2cap_send_ack(chan); | 4850 | skb_in_use = 1; |
4381 | else | 4851 | BT_DBG("Queued %p (queue len %d)", skb, |
4382 | __set_ack_timer(chan); | 4852 | skb_queue_len(&chan->srej_q)); |
4383 | 4853 | ||
4384 | return 0; | 4854 | err = l2cap_rx_queued_iframes(chan); |
4855 | if (err) | ||
4856 | break; | ||
4385 | 4857 | ||
4386 | drop: | 4858 | break; |
4387 | kfree_skb(skb); | 4859 | case L2CAP_TXSEQ_UNEXPECTED: |
4388 | return 0; | 4860 | /* Got a frame that can't be reassembled yet. |
4389 | } | 4861 | * Save it for later, and send SREJs to cover |
4862 | * the missing frames. | ||
4863 | */ | ||
4864 | skb_queue_tail(&chan->srej_q, skb); | ||
4865 | skb_in_use = 1; | ||
4866 | BT_DBG("Queued %p (queue len %d)", skb, | ||
4867 | skb_queue_len(&chan->srej_q)); | ||
4868 | |||
4869 | l2cap_pass_to_tx(chan, control); | ||
4870 | l2cap_send_srej(chan, control->txseq); | ||
4871 | break; | ||
4872 | case L2CAP_TXSEQ_UNEXPECTED_SREJ: | ||
4873 | /* This frame was requested with an SREJ, but | ||
4874 | * some expected retransmitted frames are | ||
4875 | * missing. Request retransmission of missing | ||
4876 | * SREJ'd frames. | ||
4877 | */ | ||
4878 | skb_queue_tail(&chan->srej_q, skb); | ||
4879 | skb_in_use = 1; | ||
4880 | BT_DBG("Queued %p (queue len %d)", skb, | ||
4881 | skb_queue_len(&chan->srej_q)); | ||
4882 | |||
4883 | l2cap_pass_to_tx(chan, control); | ||
4884 | l2cap_send_srej_list(chan, control->txseq); | ||
4885 | break; | ||
4886 | case L2CAP_TXSEQ_DUPLICATE_SREJ: | ||
4887 | /* We've already queued this frame. Drop this copy. */ | ||
4888 | l2cap_pass_to_tx(chan, control); | ||
4889 | break; | ||
4890 | case L2CAP_TXSEQ_DUPLICATE: | ||
4891 | /* Expecting a later sequence number, so this frame | ||
4892 | * was already received. Ignore it completely. | ||
4893 | */ | ||
4894 | break; | ||
4895 | case L2CAP_TXSEQ_INVALID_IGNORE: | ||
4896 | break; | ||
4897 | case L2CAP_TXSEQ_INVALID: | ||
4898 | default: | ||
4899 | l2cap_send_disconn_req(chan->conn, chan, | ||
4900 | ECONNRESET); | ||
4901 | break; | ||
4902 | } | ||
4903 | break; | ||
4904 | case L2CAP_EV_RECV_RR: | ||
4905 | l2cap_pass_to_tx(chan, control); | ||
4906 | if (control->final) { | ||
4907 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4390 | 4908 | ||
4391 | static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) | 4909 | if (!test_and_clear_bit(CONN_REJ_ACT, |
4392 | { | 4910 | &chan->conn_state)) { |
4393 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, | 4911 | control->final = 0; |
4394 | __get_reqseq(chan, rx_control), rx_control); | 4912 | l2cap_retransmit_all(chan, control); |
4913 | } | ||
4395 | 4914 | ||
4396 | chan->expected_ack_seq = __get_reqseq(chan, rx_control); | 4915 | l2cap_ertm_send(chan); |
4397 | l2cap_drop_acked_frames(chan); | 4916 | } else if (control->poll) { |
4917 | if (test_and_clear_bit(CONN_REMOTE_BUSY, | ||
4918 | &chan->conn_state) && | ||
4919 | chan->unacked_frames) { | ||
4920 | __set_retrans_timer(chan); | ||
4921 | } | ||
4398 | 4922 | ||
4399 | if (__is_ctrl_poll(chan, rx_control)) { | 4923 | set_bit(CONN_SEND_FBIT, &chan->conn_state); |
4400 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | 4924 | l2cap_send_srej_tail(chan); |
4401 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 4925 | } else { |
4402 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && | 4926 | if (test_and_clear_bit(CONN_REMOTE_BUSY, |
4403 | (chan->unacked_frames > 0)) | 4927 | &chan->conn_state) && |
4928 | chan->unacked_frames) | ||
4404 | __set_retrans_timer(chan); | 4929 | __set_retrans_timer(chan); |
4405 | 4930 | ||
4406 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4931 | l2cap_send_ack(chan); |
4407 | l2cap_send_srejtail(chan); | 4932 | } |
4933 | break; | ||
4934 | case L2CAP_EV_RECV_RNR: | ||
4935 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4936 | l2cap_pass_to_tx(chan, control); | ||
4937 | if (control->poll) { | ||
4938 | l2cap_send_srej_tail(chan); | ||
4408 | } else { | 4939 | } else { |
4409 | l2cap_send_i_or_rr_or_rnr(chan); | 4940 | struct l2cap_ctrl rr_control; |
4941 | memset(&rr_control, 0, sizeof(rr_control)); | ||
4942 | rr_control.sframe = 1; | ||
4943 | rr_control.super = L2CAP_SUPER_RR; | ||
4944 | rr_control.reqseq = chan->buffer_seq; | ||
4945 | l2cap_send_sframe(chan, &rr_control); | ||
4410 | } | 4946 | } |
4411 | 4947 | ||
4412 | } else if (__is_ctrl_final(chan, rx_control)) { | 4948 | break; |
4413 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4949 | case L2CAP_EV_RECV_REJ: |
4414 | 4950 | l2cap_handle_rej(chan, control); | |
4415 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | 4951 | break; |
4416 | l2cap_retransmit_frames(chan); | 4952 | case L2CAP_EV_RECV_SREJ: |
4417 | 4953 | l2cap_handle_srej(chan, control); | |
4418 | } else { | 4954 | break; |
4419 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && | 4955 | } |
4420 | (chan->unacked_frames > 0)) | ||
4421 | __set_retrans_timer(chan); | ||
4422 | 4956 | ||
4423 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4957 | if (skb && !skb_in_use) { |
4424 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) | 4958 | BT_DBG("Freeing %p", skb); |
4425 | l2cap_send_ack(chan); | 4959 | kfree_skb(skb); |
4426 | else | ||
4427 | l2cap_ertm_send(chan); | ||
4428 | } | 4960 | } |
4961 | |||
4962 | return err; | ||
4429 | } | 4963 | } |
4430 | 4964 | ||
4431 | static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) | 4965 | static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) |
4432 | { | 4966 | { |
4433 | u16 tx_seq = __get_reqseq(chan, rx_control); | 4967 | /* Make sure reqseq is for a packet that has been sent but not acked */ |
4434 | 4968 | u16 unacked; | |
4435 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); | ||
4436 | |||
4437 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4438 | |||
4439 | chan->expected_ack_seq = tx_seq; | ||
4440 | l2cap_drop_acked_frames(chan); | ||
4441 | |||
4442 | if (__is_ctrl_final(chan, rx_control)) { | ||
4443 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | ||
4444 | l2cap_retransmit_frames(chan); | ||
4445 | } else { | ||
4446 | l2cap_retransmit_frames(chan); | ||
4447 | 4969 | ||
4448 | if (test_bit(CONN_WAIT_F, &chan->conn_state)) | 4970 | unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); |
4449 | set_bit(CONN_REJ_ACT, &chan->conn_state); | 4971 | return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; |
4450 | } | ||
4451 | } | 4972 | } |
4452 | static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) | ||
4453 | { | ||
4454 | u16 tx_seq = __get_reqseq(chan, rx_control); | ||
4455 | |||
4456 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); | ||
4457 | |||
4458 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
4459 | 4973 | ||
4460 | if (__is_ctrl_poll(chan, rx_control)) { | 4974 | static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
4461 | chan->expected_ack_seq = tx_seq; | 4975 | struct sk_buff *skb, u8 event) |
4462 | l2cap_drop_acked_frames(chan); | 4976 | { |
4463 | 4977 | int err = 0; | |
4464 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | ||
4465 | l2cap_retransmit_one_frame(chan, tx_seq); | ||
4466 | 4978 | ||
4467 | l2cap_ertm_send(chan); | 4979 | BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, |
4980 | control, skb, event, chan->rx_state); | ||
4468 | 4981 | ||
4469 | if (test_bit(CONN_WAIT_F, &chan->conn_state)) { | 4982 | if (__valid_reqseq(chan, control->reqseq)) { |
4470 | chan->srej_save_reqseq = tx_seq; | 4983 | switch (chan->rx_state) { |
4471 | set_bit(CONN_SREJ_ACT, &chan->conn_state); | 4984 | case L2CAP_RX_STATE_RECV: |
4985 | err = l2cap_rx_state_recv(chan, control, skb, event); | ||
4986 | break; | ||
4987 | case L2CAP_RX_STATE_SREJ_SENT: | ||
4988 | err = l2cap_rx_state_srej_sent(chan, control, skb, | ||
4989 | event); | ||
4990 | break; | ||
4991 | default: | ||
4992 | /* shut it down */ | ||
4993 | break; | ||
4472 | } | 4994 | } |
4473 | } else if (__is_ctrl_final(chan, rx_control)) { | ||
4474 | if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && | ||
4475 | chan->srej_save_reqseq == tx_seq) | ||
4476 | clear_bit(CONN_SREJ_ACT, &chan->conn_state); | ||
4477 | else | ||
4478 | l2cap_retransmit_one_frame(chan, tx_seq); | ||
4479 | } else { | 4995 | } else { |
4480 | l2cap_retransmit_one_frame(chan, tx_seq); | 4996 | BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", |
4481 | if (test_bit(CONN_WAIT_F, &chan->conn_state)) { | 4997 | control->reqseq, chan->next_tx_seq, |
4482 | chan->srej_save_reqseq = tx_seq; | 4998 | chan->expected_ack_seq); |
4483 | set_bit(CONN_SREJ_ACT, &chan->conn_state); | 4999 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
4484 | } | ||
4485 | } | 5000 | } |
5001 | |||
5002 | return err; | ||
4486 | } | 5003 | } |
4487 | 5004 | ||
4488 | static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) | 5005 | static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
5006 | struct sk_buff *skb) | ||
4489 | { | 5007 | { |
4490 | u16 tx_seq = __get_reqseq(chan, rx_control); | 5008 | int err = 0; |
4491 | 5009 | ||
4492 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); | 5010 | BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, |
5011 | chan->rx_state); | ||
4493 | 5012 | ||
4494 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 5013 | if (l2cap_classify_txseq(chan, control->txseq) == |
4495 | chan->expected_ack_seq = tx_seq; | 5014 | L2CAP_TXSEQ_EXPECTED) { |
4496 | l2cap_drop_acked_frames(chan); | 5015 | l2cap_pass_to_tx(chan, control); |
4497 | 5016 | ||
4498 | if (__is_ctrl_poll(chan, rx_control)) | 5017 | BT_DBG("buffer_seq %d->%d", chan->buffer_seq, |
4499 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | 5018 | __next_seq(chan, chan->buffer_seq)); |
4500 | 5019 | ||
4501 | if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 5020 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); |
4502 | __clear_retrans_timer(chan); | ||
4503 | if (__is_ctrl_poll(chan, rx_control)) | ||
4504 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); | ||
4505 | return; | ||
4506 | } | ||
4507 | 5021 | ||
4508 | if (__is_ctrl_poll(chan, rx_control)) { | 5022 | l2cap_reassemble_sdu(chan, skb, control); |
4509 | l2cap_send_srejtail(chan); | ||
4510 | } else { | 5023 | } else { |
4511 | rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); | 5024 | if (chan->sdu) { |
4512 | l2cap_send_sframe(chan, rx_control); | 5025 | kfree_skb(chan->sdu); |
4513 | } | 5026 | chan->sdu = NULL; |
4514 | } | 5027 | } |
4515 | 5028 | chan->sdu_last_frag = NULL; | |
4516 | static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) | 5029 | chan->sdu_len = 0; |
4517 | { | ||
4518 | BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); | ||
4519 | 5030 | ||
4520 | if (__is_ctrl_final(chan, rx_control) && | 5031 | if (skb) { |
4521 | test_bit(CONN_WAIT_F, &chan->conn_state)) { | 5032 | BT_DBG("Freeing %p", skb); |
4522 | __clear_monitor_timer(chan); | 5033 | kfree_skb(skb); |
4523 | if (chan->unacked_frames > 0) | 5034 | } |
4524 | __set_retrans_timer(chan); | ||
4525 | clear_bit(CONN_WAIT_F, &chan->conn_state); | ||
4526 | } | 5035 | } |
4527 | 5036 | ||
4528 | switch (__get_ctrl_super(chan, rx_control)) { | 5037 | chan->last_acked_seq = control->txseq; |
4529 | case L2CAP_SUPER_RR: | 5038 | chan->expected_tx_seq = __next_seq(chan, control->txseq); |
4530 | l2cap_data_channel_rrframe(chan, rx_control); | ||
4531 | break; | ||
4532 | 5039 | ||
4533 | case L2CAP_SUPER_REJ: | 5040 | return err; |
4534 | l2cap_data_channel_rejframe(chan, rx_control); | ||
4535 | break; | ||
4536 | |||
4537 | case L2CAP_SUPER_SREJ: | ||
4538 | l2cap_data_channel_srejframe(chan, rx_control); | ||
4539 | break; | ||
4540 | |||
4541 | case L2CAP_SUPER_RNR: | ||
4542 | l2cap_data_channel_rnrframe(chan, rx_control); | ||
4543 | break; | ||
4544 | } | ||
4545 | |||
4546 | kfree_skb(skb); | ||
4547 | return 0; | ||
4548 | } | 5041 | } |
4549 | 5042 | ||
4550 | static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) | 5043 | static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) |
4551 | { | 5044 | { |
4552 | u32 control; | 5045 | struct l2cap_ctrl *control = &bt_cb(skb)->control; |
4553 | u16 req_seq; | 5046 | u16 len; |
4554 | int len, next_tx_seq_offset, req_seq_offset; | 5047 | u8 event; |
4555 | 5048 | ||
4556 | __unpack_control(chan, skb); | 5049 | __unpack_control(chan, skb); |
4557 | 5050 | ||
4558 | control = __get_control(chan, skb->data); | ||
4559 | skb_pull(skb, __ctrl_size(chan)); | ||
4560 | len = skb->len; | 5051 | len = skb->len; |
4561 | 5052 | ||
4562 | /* | 5053 | /* |
4563 | * We can just drop the corrupted I-frame here. | 5054 | * We can just drop the corrupted I-frame here. |
4564 | * Receiver will miss it and start proper recovery | 5055 | * Receiver will miss it and start proper recovery |
4565 | * procedures and ask retransmission. | 5056 | * procedures and ask for retransmission. |
4566 | */ | 5057 | */ |
4567 | if (l2cap_check_fcs(chan, skb)) | 5058 | if (l2cap_check_fcs(chan, skb)) |
4568 | goto drop; | 5059 | goto drop; |
4569 | 5060 | ||
4570 | if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) | 5061 | if (!control->sframe && control->sar == L2CAP_SAR_START) |
4571 | len -= L2CAP_SDULEN_SIZE; | 5062 | len -= L2CAP_SDULEN_SIZE; |
4572 | 5063 | ||
4573 | if (chan->fcs == L2CAP_FCS_CRC16) | 5064 | if (chan->fcs == L2CAP_FCS_CRC16) |
@@ -4578,34 +5069,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) | |||
4578 | goto drop; | 5069 | goto drop; |
4579 | } | 5070 | } |
4580 | 5071 | ||
4581 | req_seq = __get_reqseq(chan, control); | 5072 | if (!control->sframe) { |
4582 | 5073 | int err; | |
4583 | req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); | ||
4584 | |||
4585 | next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, | ||
4586 | chan->expected_ack_seq); | ||
4587 | 5074 | ||
4588 | /* check for invalid req-seq */ | 5075 | BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", |
4589 | if (req_seq_offset > next_tx_seq_offset) { | 5076 | control->sar, control->reqseq, control->final, |
4590 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 5077 | control->txseq); |
4591 | goto drop; | ||
4592 | } | ||
4593 | 5078 | ||
4594 | if (!__is_sframe(chan, control)) { | 5079 | /* Validate F-bit - F=0 always valid, F=1 only |
4595 | if (len < 0) { | 5080 | * valid in TX WAIT_F |
4596 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 5081 | */ |
5082 | if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) | ||
4597 | goto drop; | 5083 | goto drop; |
5084 | |||
5085 | if (chan->mode != L2CAP_MODE_STREAMING) { | ||
5086 | event = L2CAP_EV_RECV_IFRAME; | ||
5087 | err = l2cap_rx(chan, control, skb, event); | ||
5088 | } else { | ||
5089 | err = l2cap_stream_rx(chan, control, skb); | ||
4598 | } | 5090 | } |
4599 | 5091 | ||
4600 | l2cap_data_channel_iframe(chan, control, skb); | 5092 | if (err) |
5093 | l2cap_send_disconn_req(chan->conn, chan, | ||
5094 | ECONNRESET); | ||
4601 | } else { | 5095 | } else { |
5096 | const u8 rx_func_to_event[4] = { | ||
5097 | L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, | ||
5098 | L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ | ||
5099 | }; | ||
5100 | |||
5101 | /* Only I-frames are expected in streaming mode */ | ||
5102 | if (chan->mode == L2CAP_MODE_STREAMING) | ||
5103 | goto drop; | ||
5104 | |||
5105 | BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", | ||
5106 | control->reqseq, control->final, control->poll, | ||
5107 | control->super); | ||
5108 | |||
4602 | if (len != 0) { | 5109 | if (len != 0) { |
4603 | BT_ERR("%d", len); | 5110 | BT_ERR("%d", len); |
4604 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 5111 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
4605 | goto drop; | 5112 | goto drop; |
4606 | } | 5113 | } |
4607 | 5114 | ||
4608 | l2cap_data_channel_sframe(chan, control, skb); | 5115 | /* Validate F and P bits */ |
5116 | if (control->final && (control->poll || | ||
5117 | chan->tx_state != L2CAP_TX_STATE_WAIT_F)) | ||
5118 | goto drop; | ||
5119 | |||
5120 | event = rx_func_to_event[control->super]; | ||
5121 | if (l2cap_rx(chan, control, skb, event)) | ||
5122 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4609 | } | 5123 | } |
4610 | 5124 | ||
4611 | return 0; | 5125 | return 0; |
@@ -4615,19 +5129,27 @@ drop: | |||
4615 | return 0; | 5129 | return 0; |
4616 | } | 5130 | } |
4617 | 5131 | ||
4618 | static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) | 5132 | static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, |
5133 | struct sk_buff *skb) | ||
4619 | { | 5134 | { |
4620 | struct l2cap_chan *chan; | 5135 | struct l2cap_chan *chan; |
4621 | u32 control; | ||
4622 | u16 tx_seq; | ||
4623 | int len; | ||
4624 | 5136 | ||
4625 | chan = l2cap_get_chan_by_scid(conn, cid); | 5137 | chan = l2cap_get_chan_by_scid(conn, cid); |
4626 | if (!chan) { | 5138 | if (!chan) { |
4627 | BT_DBG("unknown cid 0x%4.4x", cid); | 5139 | if (cid == L2CAP_CID_A2MP) { |
4628 | /* Drop packet and return */ | 5140 | chan = a2mp_channel_create(conn, skb); |
4629 | kfree_skb(skb); | 5141 | if (!chan) { |
4630 | return 0; | 5142 | kfree_skb(skb); |
5143 | return; | ||
5144 | } | ||
5145 | |||
5146 | l2cap_chan_lock(chan); | ||
5147 | } else { | ||
5148 | BT_DBG("unknown cid 0x%4.4x", cid); | ||
5149 | /* Drop packet and return */ | ||
5150 | kfree_skb(skb); | ||
5151 | return; | ||
5152 | } | ||
4631 | } | 5153 | } |
4632 | 5154 | ||
4633 | BT_DBG("chan %p, len %d", chan, skb->len); | 5155 | BT_DBG("chan %p, len %d", chan, skb->len); |
@@ -4645,49 +5167,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk | |||
4645 | if (chan->imtu < skb->len) | 5167 | if (chan->imtu < skb->len) |
4646 | goto drop; | 5168 | goto drop; |
4647 | 5169 | ||
4648 | if (!chan->ops->recv(chan->data, skb)) | 5170 | if (!chan->ops->recv(chan, skb)) |
4649 | goto done; | 5171 | goto done; |
4650 | break; | 5172 | break; |
4651 | 5173 | ||
4652 | case L2CAP_MODE_ERTM: | 5174 | case L2CAP_MODE_ERTM: |
4653 | l2cap_ertm_data_rcv(chan, skb); | ||
4654 | |||
4655 | goto done; | ||
4656 | |||
4657 | case L2CAP_MODE_STREAMING: | 5175 | case L2CAP_MODE_STREAMING: |
4658 | control = __get_control(chan, skb->data); | 5176 | l2cap_data_rcv(chan, skb); |
4659 | skb_pull(skb, __ctrl_size(chan)); | ||
4660 | len = skb->len; | ||
4661 | |||
4662 | if (l2cap_check_fcs(chan, skb)) | ||
4663 | goto drop; | ||
4664 | |||
4665 | if (__is_sar_start(chan, control)) | ||
4666 | len -= L2CAP_SDULEN_SIZE; | ||
4667 | |||
4668 | if (chan->fcs == L2CAP_FCS_CRC16) | ||
4669 | len -= L2CAP_FCS_SIZE; | ||
4670 | |||
4671 | if (len > chan->mps || len < 0 || __is_sframe(chan, control)) | ||
4672 | goto drop; | ||
4673 | |||
4674 | tx_seq = __get_txseq(chan, control); | ||
4675 | |||
4676 | if (chan->expected_tx_seq != tx_seq) { | ||
4677 | /* Frame(s) missing - must discard partial SDU */ | ||
4678 | kfree_skb(chan->sdu); | ||
4679 | chan->sdu = NULL; | ||
4680 | chan->sdu_last_frag = NULL; | ||
4681 | chan->sdu_len = 0; | ||
4682 | |||
4683 | /* TODO: Notify userland of missing data */ | ||
4684 | } | ||
4685 | |||
4686 | chan->expected_tx_seq = __next_seq(chan, tx_seq); | ||
4687 | |||
4688 | if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) | ||
4689 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4690 | |||
4691 | goto done; | 5177 | goto done; |
4692 | 5178 | ||
4693 | default: | 5179 | default: |
@@ -4700,11 +5186,10 @@ drop: | |||
4700 | 5186 | ||
4701 | done: | 5187 | done: |
4702 | l2cap_chan_unlock(chan); | 5188 | l2cap_chan_unlock(chan); |
4703 | |||
4704 | return 0; | ||
4705 | } | 5189 | } |
4706 | 5190 | ||
4707 | static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) | 5191 | static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, |
5192 | struct sk_buff *skb) | ||
4708 | { | 5193 | { |
4709 | struct l2cap_chan *chan; | 5194 | struct l2cap_chan *chan; |
4710 | 5195 | ||
@@ -4720,17 +5205,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str | |||
4720 | if (chan->imtu < skb->len) | 5205 | if (chan->imtu < skb->len) |
4721 | goto drop; | 5206 | goto drop; |
4722 | 5207 | ||
4723 | if (!chan->ops->recv(chan->data, skb)) | 5208 | if (!chan->ops->recv(chan, skb)) |
4724 | return 0; | 5209 | return; |
4725 | 5210 | ||
4726 | drop: | 5211 | drop: |
4727 | kfree_skb(skb); | 5212 | kfree_skb(skb); |
4728 | |||
4729 | return 0; | ||
4730 | } | 5213 | } |
4731 | 5214 | ||
4732 | static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, | 5215 | static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, |
4733 | struct sk_buff *skb) | 5216 | struct sk_buff *skb) |
4734 | { | 5217 | { |
4735 | struct l2cap_chan *chan; | 5218 | struct l2cap_chan *chan; |
4736 | 5219 | ||
@@ -4746,13 +5229,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, | |||
4746 | if (chan->imtu < skb->len) | 5229 | if (chan->imtu < skb->len) |
4747 | goto drop; | 5230 | goto drop; |
4748 | 5231 | ||
4749 | if (!chan->ops->recv(chan->data, skb)) | 5232 | if (!chan->ops->recv(chan, skb)) |
4750 | return 0; | 5233 | return; |
4751 | 5234 | ||
4752 | drop: | 5235 | drop: |
4753 | kfree_skb(skb); | 5236 | kfree_skb(skb); |
4754 | |||
4755 | return 0; | ||
4756 | } | 5237 | } |
4757 | 5238 | ||
4758 | static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | 5239 | static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) |
@@ -4780,7 +5261,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | |||
4780 | 5261 | ||
4781 | case L2CAP_CID_CONN_LESS: | 5262 | case L2CAP_CID_CONN_LESS: |
4782 | psm = get_unaligned((__le16 *) skb->data); | 5263 | psm = get_unaligned((__le16 *) skb->data); |
4783 | skb_pull(skb, 2); | 5264 | skb_pull(skb, L2CAP_PSMLEN_SIZE); |
4784 | l2cap_conless_channel(conn, psm, skb); | 5265 | l2cap_conless_channel(conn, psm, skb); |
4785 | break; | 5266 | break; |
4786 | 5267 | ||
@@ -4974,6 +5455,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
4974 | rsp.status = cpu_to_le16(stat); | 5455 | rsp.status = cpu_to_le16(stat); |
4975 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 5456 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
4976 | sizeof(rsp), &rsp); | 5457 | sizeof(rsp), &rsp); |
5458 | |||
5459 | if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && | ||
5460 | res == L2CAP_CR_SUCCESS) { | ||
5461 | char buf[128]; | ||
5462 | set_bit(CONF_REQ_SENT, &chan->conf_state); | ||
5463 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | ||
5464 | L2CAP_CONF_REQ, | ||
5465 | l2cap_build_conf_req(chan, buf), | ||
5466 | buf); | ||
5467 | chan->num_conf_req++; | ||
5468 | } | ||
4977 | } | 5469 | } |
4978 | 5470 | ||
4979 | l2cap_chan_unlock(chan); | 5471 | l2cap_chan_unlock(chan); |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 3bb1611b9d48..a4bb27e8427e 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | /* Bluetooth L2CAP sockets. */ | 28 | /* Bluetooth L2CAP sockets. */ |
29 | 29 | ||
30 | #include <linux/security.h> | ||
31 | #include <linux/export.h> | 30 | #include <linux/export.h> |
32 | 31 | ||
33 | #include <net/bluetooth/bluetooth.h> | 32 | #include <net/bluetooth/bluetooth.h> |
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | |||
89 | if (err < 0) | 88 | if (err < 0) |
90 | goto done; | 89 | goto done; |
91 | 90 | ||
92 | if (__le16_to_cpu(la.l2_psm) == 0x0001 || | 91 | if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || |
93 | __le16_to_cpu(la.l2_psm) == 0x0003) | 92 | __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) |
94 | chan->sec_level = BT_SECURITY_SDP; | 93 | chan->sec_level = BT_SECURITY_SDP; |
95 | 94 | ||
96 | bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); | 95 | bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); |
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
446 | return err; | 445 | return err; |
447 | } | 446 | } |
448 | 447 | ||
448 | static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) | ||
449 | { | ||
450 | switch (chan->scid) { | ||
451 | case L2CAP_CID_LE_DATA: | ||
452 | if (mtu < L2CAP_LE_MIN_MTU) | ||
453 | return false; | ||
454 | break; | ||
455 | |||
456 | default: | ||
457 | if (mtu < L2CAP_DEFAULT_MIN_MTU) | ||
458 | return false; | ||
459 | } | ||
460 | |||
461 | return true; | ||
462 | } | ||
463 | |||
449 | static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) | 464 | static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) |
450 | { | 465 | { |
451 | struct sock *sk = sock->sk; | 466 | struct sock *sk = sock->sk; |
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
484 | break; | 499 | break; |
485 | } | 500 | } |
486 | 501 | ||
502 | if (!l2cap_valid_mtu(chan, opts.imtu)) { | ||
503 | err = -EINVAL; | ||
504 | break; | ||
505 | } | ||
506 | |||
487 | chan->mode = opts.mode; | 507 | chan->mode = opts.mode; |
488 | switch (chan->mode) { | 508 | switch (chan->mode) { |
489 | case L2CAP_MODE_BASIC: | 509 | case L2CAP_MODE_BASIC: |
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock) | |||
873 | return err; | 893 | return err; |
874 | } | 894 | } |
875 | 895 | ||
876 | static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) | 896 | static void l2cap_sock_cleanup_listen(struct sock *parent) |
877 | { | 897 | { |
878 | struct sock *sk, *parent = data; | 898 | struct sock *sk; |
899 | |||
900 | BT_DBG("parent %p", parent); | ||
901 | |||
902 | /* Close not yet accepted channels */ | ||
903 | while ((sk = bt_accept_dequeue(parent, NULL))) { | ||
904 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | ||
905 | |||
906 | l2cap_chan_lock(chan); | ||
907 | __clear_chan_timer(chan); | ||
908 | l2cap_chan_close(chan, ECONNRESET); | ||
909 | l2cap_chan_unlock(chan); | ||
910 | |||
911 | l2cap_sock_kill(sk); | ||
912 | } | ||
913 | } | ||
914 | |||
915 | static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) | ||
916 | { | ||
917 | struct sock *sk, *parent = chan->data; | ||
918 | |||
919 | /* Check for backlog size */ | ||
920 | if (sk_acceptq_is_full(parent)) { | ||
921 | BT_DBG("backlog full %d", parent->sk_ack_backlog); | ||
922 | return NULL; | ||
923 | } | ||
879 | 924 | ||
880 | sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, | 925 | sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, |
881 | GFP_ATOMIC); | 926 | GFP_ATOMIC); |
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) | |||
889 | return l2cap_pi(sk)->chan; | 934 | return l2cap_pi(sk)->chan; |
890 | } | 935 | } |
891 | 936 | ||
892 | static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) | 937 | static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) |
893 | { | 938 | { |
894 | int err; | 939 | int err; |
895 | struct sock *sk = data; | 940 | struct sock *sk = chan->data; |
896 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 941 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
897 | 942 | ||
898 | lock_sock(sk); | 943 | lock_sock(sk); |
@@ -925,16 +970,57 @@ done: | |||
925 | return err; | 970 | return err; |
926 | } | 971 | } |
927 | 972 | ||
928 | static void l2cap_sock_close_cb(void *data) | 973 | static void l2cap_sock_close_cb(struct l2cap_chan *chan) |
929 | { | 974 | { |
930 | struct sock *sk = data; | 975 | struct sock *sk = chan->data; |
931 | 976 | ||
932 | l2cap_sock_kill(sk); | 977 | l2cap_sock_kill(sk); |
933 | } | 978 | } |
934 | 979 | ||
935 | static void l2cap_sock_state_change_cb(void *data, int state) | 980 | static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) |
936 | { | 981 | { |
937 | struct sock *sk = data; | 982 | struct sock *sk = chan->data; |
983 | struct sock *parent; | ||
984 | |||
985 | lock_sock(sk); | ||
986 | |||
987 | parent = bt_sk(sk)->parent; | ||
988 | |||
989 | sock_set_flag(sk, SOCK_ZAPPED); | ||
990 | |||
991 | switch (chan->state) { | ||
992 | case BT_OPEN: | ||
993 | case BT_BOUND: | ||
994 | case BT_CLOSED: | ||
995 | break; | ||
996 | case BT_LISTEN: | ||
997 | l2cap_sock_cleanup_listen(sk); | ||
998 | sk->sk_state = BT_CLOSED; | ||
999 | chan->state = BT_CLOSED; | ||
1000 | |||
1001 | break; | ||
1002 | default: | ||
1003 | sk->sk_state = BT_CLOSED; | ||
1004 | chan->state = BT_CLOSED; | ||
1005 | |||
1006 | sk->sk_err = err; | ||
1007 | |||
1008 | if (parent) { | ||
1009 | bt_accept_unlink(sk); | ||
1010 | parent->sk_data_ready(parent, 0); | ||
1011 | } else { | ||
1012 | sk->sk_state_change(sk); | ||
1013 | } | ||
1014 | |||
1015 | break; | ||
1016 | } | ||
1017 | |||
1018 | release_sock(sk); | ||
1019 | } | ||
1020 | |||
1021 | static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state) | ||
1022 | { | ||
1023 | struct sock *sk = chan->data; | ||
938 | 1024 | ||
939 | sk->sk_state = state; | 1025 | sk->sk_state = state; |
940 | } | 1026 | } |
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, | |||
955 | return skb; | 1041 | return skb; |
956 | } | 1042 | } |
957 | 1043 | ||
1044 | static void l2cap_sock_ready_cb(struct l2cap_chan *chan) | ||
1045 | { | ||
1046 | struct sock *sk = chan->data; | ||
1047 | struct sock *parent; | ||
1048 | |||
1049 | lock_sock(sk); | ||
1050 | |||
1051 | parent = bt_sk(sk)->parent; | ||
1052 | |||
1053 | BT_DBG("sk %p, parent %p", sk, parent); | ||
1054 | |||
1055 | sk->sk_state = BT_CONNECTED; | ||
1056 | sk->sk_state_change(sk); | ||
1057 | |||
1058 | if (parent) | ||
1059 | parent->sk_data_ready(parent, 0); | ||
1060 | |||
1061 | release_sock(sk); | ||
1062 | } | ||
1063 | |||
958 | static struct l2cap_ops l2cap_chan_ops = { | 1064 | static struct l2cap_ops l2cap_chan_ops = { |
959 | .name = "L2CAP Socket Interface", | 1065 | .name = "L2CAP Socket Interface", |
960 | .new_connection = l2cap_sock_new_connection_cb, | 1066 | .new_connection = l2cap_sock_new_connection_cb, |
961 | .recv = l2cap_sock_recv_cb, | 1067 | .recv = l2cap_sock_recv_cb, |
962 | .close = l2cap_sock_close_cb, | 1068 | .close = l2cap_sock_close_cb, |
1069 | .teardown = l2cap_sock_teardown_cb, | ||
963 | .state_change = l2cap_sock_state_change_cb, | 1070 | .state_change = l2cap_sock_state_change_cb, |
1071 | .ready = l2cap_sock_ready_cb, | ||
964 | .alloc_skb = l2cap_sock_alloc_skb_cb, | 1072 | .alloc_skb = l2cap_sock_alloc_skb_cb, |
965 | }; | 1073 | }; |
966 | 1074 | ||
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index 506628876f36..e1c97527e16c 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c | |||
@@ -26,12 +26,7 @@ | |||
26 | 26 | ||
27 | #define pr_fmt(fmt) "Bluetooth: " fmt | 27 | #define pr_fmt(fmt) "Bluetooth: " fmt |
28 | 28 | ||
29 | #include <linux/module.h> | 29 | #include <linux/export.h> |
30 | |||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/stddef.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <asm/errno.h> | ||
35 | 30 | ||
36 | #include <net/bluetooth/bluetooth.h> | 31 | #include <net/bluetooth/bluetooth.h> |
37 | 32 | ||
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 25d220776079..c72307cc25fc 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -24,8 +24,6 @@ | |||
24 | 24 | ||
25 | /* Bluetooth HCI Management interface */ | 25 | /* Bluetooth HCI Management interface */ |
26 | 26 | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/module.h> | 27 | #include <linux/module.h> |
30 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
31 | 29 | ||
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, | |||
714 | } | 712 | } |
715 | 713 | ||
716 | static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, | 714 | static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, |
717 | void (*cb)(struct pending_cmd *cmd, void *data), | 715 | void (*cb)(struct pending_cmd *cmd, |
716 | void *data), | ||
718 | void *data) | 717 | void *data) |
719 | { | 718 | { |
720 | struct list_head *p, *n; | 719 | struct list_head *p, *n; |
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
871 | } | 870 | } |
872 | 871 | ||
873 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || | 872 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || |
874 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { | 873 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { |
875 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, | 874 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, |
876 | MGMT_STATUS_BUSY); | 875 | MGMT_STATUS_BUSY); |
877 | goto failed; | 876 | goto failed; |
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
978 | } | 977 | } |
979 | 978 | ||
980 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || | 979 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || |
981 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { | 980 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { |
982 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, | 981 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, |
983 | MGMT_STATUS_BUSY); | 982 | MGMT_STATUS_BUSY); |
984 | goto failed; | 983 | goto failed; |
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1001 | scan = 0; | 1000 | scan = 0; |
1002 | 1001 | ||
1003 | if (test_bit(HCI_ISCAN, &hdev->flags) && | 1002 | if (test_bit(HCI_ISCAN, &hdev->flags) && |
1004 | hdev->discov_timeout > 0) | 1003 | hdev->discov_timeout > 0) |
1005 | cancel_delayed_work(&hdev->discov_off); | 1004 | cancel_delayed_work(&hdev->discov_off); |
1006 | } | 1005 | } |
1007 | 1006 | ||
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1056 | bool changed = false; | 1055 | bool changed = false; |
1057 | 1056 | ||
1058 | if (!!cp->val != test_bit(HCI_LINK_SECURITY, | 1057 | if (!!cp->val != test_bit(HCI_LINK_SECURITY, |
1059 | &hdev->dev_flags)) { | 1058 | &hdev->dev_flags)) { |
1060 | change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); | 1059 | change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); |
1061 | changed = true; | 1060 | changed = true; |
1062 | } | 1061 | } |
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev) | |||
1317 | } | 1316 | } |
1318 | 1317 | ||
1319 | static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, | 1318 | static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, |
1320 | u16 len) | 1319 | u16 len) |
1321 | { | 1320 | { |
1322 | struct mgmt_cp_remove_uuid *cp = data; | 1321 | struct mgmt_cp_remove_uuid *cp = data; |
1323 | struct pending_cmd *cmd; | 1322 | struct pending_cmd *cmd; |
@@ -1442,7 +1441,7 @@ unlock: | |||
1442 | } | 1441 | } |
1443 | 1442 | ||
1444 | static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, | 1443 | static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, |
1445 | u16 len) | 1444 | u16 len) |
1446 | { | 1445 | { |
1447 | struct mgmt_cp_load_link_keys *cp = data; | 1446 | struct mgmt_cp_load_link_keys *cp = data; |
1448 | u16 key_count, expected_len; | 1447 | u16 key_count, expected_len; |
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1454 | sizeof(struct mgmt_link_key_info); | 1453 | sizeof(struct mgmt_link_key_info); |
1455 | if (expected_len != len) { | 1454 | if (expected_len != len) { |
1456 | BT_ERR("load_link_keys: expected %u bytes, got %u bytes", | 1455 | BT_ERR("load_link_keys: expected %u bytes, got %u bytes", |
1457 | len, expected_len); | 1456 | len, expected_len); |
1458 | return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, | 1457 | return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, |
1459 | MGMT_STATUS_INVALID_PARAMS); | 1458 | MGMT_STATUS_INVALID_PARAMS); |
1460 | } | 1459 | } |
1461 | 1460 | ||
1462 | BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, | 1461 | BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, |
1463 | key_count); | 1462 | key_count); |
1464 | 1463 | ||
1465 | hci_dev_lock(hdev); | 1464 | hci_dev_lock(hdev); |
1466 | 1465 | ||
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1535 | if (cp->disconnect) { | 1534 | if (cp->disconnect) { |
1536 | if (cp->addr.type == BDADDR_BREDR) | 1535 | if (cp->addr.type == BDADDR_BREDR) |
1537 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, | 1536 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, |
1538 | &cp->addr.bdaddr); | 1537 | &cp->addr.bdaddr); |
1539 | else | 1538 | else |
1540 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, | 1539 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, |
1541 | &cp->addr.bdaddr); | 1540 | &cp->addr.bdaddr); |
1542 | } else { | 1541 | } else { |
1543 | conn = NULL; | 1542 | conn = NULL; |
1544 | } | 1543 | } |
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1594 | } | 1593 | } |
1595 | 1594 | ||
1596 | if (cp->addr.type == BDADDR_BREDR) | 1595 | if (cp->addr.type == BDADDR_BREDR) |
1597 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); | 1596 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, |
1597 | &cp->addr.bdaddr); | ||
1598 | else | 1598 | else |
1599 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); | 1599 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); |
1600 | 1600 | ||
@@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1813 | hdev->io_capability = cp->io_capability; | 1813 | hdev->io_capability = cp->io_capability; |
1814 | 1814 | ||
1815 | BT_DBG("%s IO capability set to 0x%02x", hdev->name, | 1815 | BT_DBG("%s IO capability set to 0x%02x", hdev->name, |
1816 | hdev->io_capability); | 1816 | hdev->io_capability); |
1817 | 1817 | ||
1818 | hci_dev_unlock(hdev); | 1818 | hci_dev_unlock(hdev); |
1819 | 1819 | ||
@@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1821 | 0); | 1821 | 0); |
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | static inline struct pending_cmd *find_pairing(struct hci_conn *conn) | 1824 | static struct pending_cmd *find_pairing(struct hci_conn *conn) |
1825 | { | 1825 | { |
1826 | struct hci_dev *hdev = conn->hdev; | 1826 | struct hci_dev *hdev = conn->hdev; |
1827 | struct pending_cmd *cmd; | 1827 | struct pending_cmd *cmd; |
@@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) | |||
1873 | pairing_complete(cmd, mgmt_status(status)); | 1873 | pairing_complete(cmd, mgmt_status(status)); |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | static void le_connect_complete_cb(struct hci_conn *conn, u8 status) | ||
1877 | { | ||
1878 | struct pending_cmd *cmd; | ||
1879 | |||
1880 | BT_DBG("status %u", status); | ||
1881 | |||
1882 | if (!status) | ||
1883 | return; | ||
1884 | |||
1885 | cmd = find_pairing(conn); | ||
1886 | if (!cmd) | ||
1887 | BT_DBG("Unable to find a pending command"); | ||
1888 | else | ||
1889 | pairing_complete(cmd, mgmt_status(status)); | ||
1890 | } | ||
1891 | |||
1876 | static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, | 1892 | static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, |
1877 | u16 len) | 1893 | u16 len) |
1878 | { | 1894 | { |
@@ -1911,8 +1927,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1911 | rp.addr.type = cp->addr.type; | 1927 | rp.addr.type = cp->addr.type; |
1912 | 1928 | ||
1913 | if (IS_ERR(conn)) { | 1929 | if (IS_ERR(conn)) { |
1930 | int status; | ||
1931 | |||
1932 | if (PTR_ERR(conn) == -EBUSY) | ||
1933 | status = MGMT_STATUS_BUSY; | ||
1934 | else | ||
1935 | status = MGMT_STATUS_CONNECT_FAILED; | ||
1936 | |||
1914 | err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, | 1937 | err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, |
1915 | MGMT_STATUS_CONNECT_FAILED, &rp, | 1938 | status, &rp, |
1916 | sizeof(rp)); | 1939 | sizeof(rp)); |
1917 | goto unlock; | 1940 | goto unlock; |
1918 | } | 1941 | } |
@@ -1934,6 +1957,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1934 | /* For LE, just connecting isn't a proof that the pairing finished */ | 1957 | /* For LE, just connecting isn't a proof that the pairing finished */ |
1935 | if (cp->addr.type == BDADDR_BREDR) | 1958 | if (cp->addr.type == BDADDR_BREDR) |
1936 | conn->connect_cfm_cb = pairing_complete_cb; | 1959 | conn->connect_cfm_cb = pairing_complete_cb; |
1960 | else | ||
1961 | conn->connect_cfm_cb = le_connect_complete_cb; | ||
1937 | 1962 | ||
1938 | conn->security_cfm_cb = pairing_complete_cb; | 1963 | conn->security_cfm_cb = pairing_complete_cb; |
1939 | conn->disconn_cfm_cb = pairing_complete_cb; | 1964 | conn->disconn_cfm_cb = pairing_complete_cb; |
@@ -1941,7 +1966,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1941 | cmd->user_data = conn; | 1966 | cmd->user_data = conn; |
1942 | 1967 | ||
1943 | if (conn->state == BT_CONNECTED && | 1968 | if (conn->state == BT_CONNECTED && |
1944 | hci_conn_security(conn, sec_level, auth_type)) | 1969 | hci_conn_security(conn, sec_level, auth_type)) |
1945 | pairing_complete(cmd, 0); | 1970 | pairing_complete(cmd, 0); |
1946 | 1971 | ||
1947 | err = 0; | 1972 | err = 0; |
@@ -2238,7 +2263,7 @@ unlock: | |||
2238 | } | 2263 | } |
2239 | 2264 | ||
2240 | static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, | 2265 | static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, |
2241 | void *data, u16 len) | 2266 | void *data, u16 len) |
2242 | { | 2267 | { |
2243 | struct mgmt_cp_remove_remote_oob_data *cp = data; | 2268 | struct mgmt_cp_remove_remote_oob_data *cp = data; |
2244 | u8 status; | 2269 | u8 status; |
@@ -2407,7 +2432,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2407 | 2432 | ||
2408 | case DISCOVERY_RESOLVING: | 2433 | case DISCOVERY_RESOLVING: |
2409 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, | 2434 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, |
2410 | NAME_PENDING); | 2435 | NAME_PENDING); |
2411 | if (!e) { | 2436 | if (!e) { |
2412 | mgmt_pending_remove(cmd); | 2437 | mgmt_pending_remove(cmd); |
2413 | err = cmd_complete(sk, hdev->id, | 2438 | err = cmd_complete(sk, hdev->id, |
@@ -2629,7 +2654,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, | |||
2629 | sizeof(struct mgmt_ltk_info); | 2654 | sizeof(struct mgmt_ltk_info); |
2630 | if (expected_len != len) { | 2655 | if (expected_len != len) { |
2631 | BT_ERR("load_keys: expected %u bytes, got %u bytes", | 2656 | BT_ERR("load_keys: expected %u bytes, got %u bytes", |
2632 | len, expected_len); | 2657 | len, expected_len); |
2633 | return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, | 2658 | return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, |
2634 | EINVAL); | 2659 | EINVAL); |
2635 | } | 2660 | } |
@@ -2754,7 +2779,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
2754 | } | 2779 | } |
2755 | 2780 | ||
2756 | if (opcode >= ARRAY_SIZE(mgmt_handlers) || | 2781 | if (opcode >= ARRAY_SIZE(mgmt_handlers) || |
2757 | mgmt_handlers[opcode].func == NULL) { | 2782 | mgmt_handlers[opcode].func == NULL) { |
2758 | BT_DBG("Unknown op %u", opcode); | 2783 | BT_DBG("Unknown op %u", opcode); |
2759 | err = cmd_status(sk, index, opcode, | 2784 | err = cmd_status(sk, index, opcode, |
2760 | MGMT_STATUS_UNKNOWN_COMMAND); | 2785 | MGMT_STATUS_UNKNOWN_COMMAND); |
@@ -2762,7 +2787,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
2762 | } | 2787 | } |
2763 | 2788 | ||
2764 | if ((hdev && opcode < MGMT_OP_READ_INFO) || | 2789 | if ((hdev && opcode < MGMT_OP_READ_INFO) || |
2765 | (!hdev && opcode >= MGMT_OP_READ_INFO)) { | 2790 | (!hdev && opcode >= MGMT_OP_READ_INFO)) { |
2766 | err = cmd_status(sk, index, opcode, | 2791 | err = cmd_status(sk, index, opcode, |
2767 | MGMT_STATUS_INVALID_INDEX); | 2792 | MGMT_STATUS_INVALID_INDEX); |
2768 | goto done; | 2793 | goto done; |
@@ -2771,7 +2796,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
2771 | handler = &mgmt_handlers[opcode]; | 2796 | handler = &mgmt_handlers[opcode]; |
2772 | 2797 | ||
2773 | if ((handler->var_len && len < handler->data_len) || | 2798 | if ((handler->var_len && len < handler->data_len) || |
2774 | (!handler->var_len && len != handler->data_len)) { | 2799 | (!handler->var_len && len != handler->data_len)) { |
2775 | err = cmd_status(sk, index, opcode, | 2800 | err = cmd_status(sk, index, opcode, |
2776 | MGMT_STATUS_INVALID_PARAMS); | 2801 | MGMT_STATUS_INVALID_PARAMS); |
2777 | goto done; | 2802 | goto done; |
@@ -2955,7 +2980,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, | |||
2955 | bacpy(&ev.key.addr.bdaddr, &key->bdaddr); | 2980 | bacpy(&ev.key.addr.bdaddr, &key->bdaddr); |
2956 | ev.key.addr.type = BDADDR_BREDR; | 2981 | ev.key.addr.type = BDADDR_BREDR; |
2957 | ev.key.type = key->type; | 2982 | ev.key.type = key->type; |
2958 | memcpy(ev.key.val, key->val, 16); | 2983 | memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); |
2959 | ev.key.pin_len = key->pin_len; | 2984 | ev.key.pin_len = key->pin_len; |
2960 | 2985 | ||
2961 | return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); | 2986 | return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); |
@@ -3090,7 +3115,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3090 | mgmt_pending_remove(cmd); | 3115 | mgmt_pending_remove(cmd); |
3091 | 3116 | ||
3092 | mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, | 3117 | mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, |
3093 | hdev); | 3118 | hdev); |
3094 | return err; | 3119 | return err; |
3095 | } | 3120 | } |
3096 | 3121 | ||
@@ -3180,7 +3205,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3180 | } | 3205 | } |
3181 | 3206 | ||
3182 | int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, | 3207 | int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3183 | u8 link_type, u8 addr_type) | 3208 | u8 link_type, u8 addr_type) |
3184 | { | 3209 | { |
3185 | struct mgmt_ev_user_passkey_request ev; | 3210 | struct mgmt_ev_user_passkey_request ev; |
3186 | 3211 | ||
@@ -3194,8 +3219,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3194 | } | 3219 | } |
3195 | 3220 | ||
3196 | static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, | 3221 | static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3197 | u8 link_type, u8 addr_type, u8 status, | 3222 | u8 link_type, u8 addr_type, u8 status, |
3198 | u8 opcode) | 3223 | u8 opcode) |
3199 | { | 3224 | { |
3200 | struct pending_cmd *cmd; | 3225 | struct pending_cmd *cmd; |
3201 | struct mgmt_rp_user_confirm_reply rp; | 3226 | struct mgmt_rp_user_confirm_reply rp; |
@@ -3226,7 +3251,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3226 | u8 link_type, u8 addr_type, u8 status) | 3251 | u8 link_type, u8 addr_type, u8 status) |
3227 | { | 3252 | { |
3228 | return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, | 3253 | return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, |
3229 | status, MGMT_OP_USER_CONFIRM_NEG_REPLY); | 3254 | status, |
3255 | MGMT_OP_USER_CONFIRM_NEG_REPLY); | ||
3230 | } | 3256 | } |
3231 | 3257 | ||
3232 | int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, | 3258 | int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
@@ -3240,7 +3266,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3240 | u8 link_type, u8 addr_type, u8 status) | 3266 | u8 link_type, u8 addr_type, u8 status) |
3241 | { | 3267 | { |
3242 | return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, | 3268 | return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, |
3243 | status, MGMT_OP_USER_PASSKEY_NEG_REPLY); | 3269 | status, |
3270 | MGMT_OP_USER_PASSKEY_NEG_REPLY); | ||
3244 | } | 3271 | } |
3245 | 3272 | ||
3246 | int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, | 3273 | int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 8a602388f1e7..c75107ef8920 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -26,22 +26,8 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/errno.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/signal.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/wait.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/debugfs.h> | 29 | #include <linux/debugfs.h> |
37 | #include <linux/seq_file.h> | ||
38 | #include <linux/net.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/kthread.h> | 30 | #include <linux/kthread.h> |
41 | #include <linux/slab.h> | ||
42 | |||
43 | #include <net/sock.h> | ||
44 | #include <linux/uaccess.h> | ||
45 | #include <asm/unaligned.h> | 31 | #include <asm/unaligned.h> |
46 | 32 | ||
47 | #include <net/bluetooth/bluetooth.h> | 33 | #include <net/bluetooth/bluetooth.h> |
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s); | |||
115 | #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) | 101 | #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) |
116 | #define __get_rpn_parity(line) (((line) >> 3) & 0x7) | 102 | #define __get_rpn_parity(line) (((line) >> 3) & 0x7) |
117 | 103 | ||
118 | static inline void rfcomm_schedule(void) | 104 | static void rfcomm_schedule(void) |
119 | { | 105 | { |
120 | if (!rfcomm_thread) | 106 | if (!rfcomm_thread) |
121 | return; | 107 | return; |
122 | wake_up_process(rfcomm_thread); | 108 | wake_up_process(rfcomm_thread); |
123 | } | 109 | } |
124 | 110 | ||
125 | static inline void rfcomm_session_put(struct rfcomm_session *s) | 111 | static void rfcomm_session_put(struct rfcomm_session *s) |
126 | { | 112 | { |
127 | if (atomic_dec_and_test(&s->refcnt)) | 113 | if (atomic_dec_and_test(&s->refcnt)) |
128 | rfcomm_session_del(s); | 114 | rfcomm_session_del(s); |
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock) | |||
227 | return err; | 213 | return err; |
228 | } | 214 | } |
229 | 215 | ||
230 | static inline int rfcomm_check_security(struct rfcomm_dlc *d) | 216 | static int rfcomm_check_security(struct rfcomm_dlc *d) |
231 | { | 217 | { |
232 | struct sock *sk = d->session->sock->sk; | 218 | struct sock *sk = d->session->sock->sk; |
233 | struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; | 219 | struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; |
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s) | |||
1750 | /* Send data queued for the DLC. | 1736 | /* Send data queued for the DLC. |
1751 | * Return number of frames left in the queue. | 1737 | * Return number of frames left in the queue. |
1752 | */ | 1738 | */ |
1753 | static inline int rfcomm_process_tx(struct rfcomm_dlc *d) | 1739 | static int rfcomm_process_tx(struct rfcomm_dlc *d) |
1754 | { | 1740 | { |
1755 | struct sk_buff *skb; | 1741 | struct sk_buff *skb; |
1756 | int err; | 1742 | int err; |
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d) | |||
1798 | return skb_queue_len(&d->tx_queue); | 1784 | return skb_queue_len(&d->tx_queue); |
1799 | } | 1785 | } |
1800 | 1786 | ||
1801 | static inline void rfcomm_process_dlcs(struct rfcomm_session *s) | 1787 | static void rfcomm_process_dlcs(struct rfcomm_session *s) |
1802 | { | 1788 | { |
1803 | struct rfcomm_dlc *d; | 1789 | struct rfcomm_dlc *d; |
1804 | struct list_head *p, *n; | 1790 | struct list_head *p, *n; |
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s) | |||
1858 | } | 1844 | } |
1859 | } | 1845 | } |
1860 | 1846 | ||
1861 | static inline void rfcomm_process_rx(struct rfcomm_session *s) | 1847 | static void rfcomm_process_rx(struct rfcomm_session *s) |
1862 | { | 1848 | { |
1863 | struct socket *sock = s->sock; | 1849 | struct socket *sock = s->sock; |
1864 | struct sock *sk = sock->sk; | 1850 | struct sock *sk = sock->sk; |
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s) | |||
1883 | } | 1869 | } |
1884 | } | 1870 | } |
1885 | 1871 | ||
1886 | static inline void rfcomm_accept_connection(struct rfcomm_session *s) | 1872 | static void rfcomm_accept_connection(struct rfcomm_session *s) |
1887 | { | 1873 | { |
1888 | struct socket *sock = s->sock, *nsock; | 1874 | struct socket *sock = s->sock, *nsock; |
1889 | int err; | 1875 | int err; |
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s) | |||
1917 | sock_release(nsock); | 1903 | sock_release(nsock); |
1918 | } | 1904 | } |
1919 | 1905 | ||
1920 | static inline void rfcomm_check_connection(struct rfcomm_session *s) | 1906 | static void rfcomm_check_connection(struct rfcomm_session *s) |
1921 | { | 1907 | { |
1922 | struct sock *sk = s->sock->sk; | 1908 | struct sock *sk = s->sock->sk; |
1923 | 1909 | ||
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s) | |||
1941 | } | 1927 | } |
1942 | } | 1928 | } |
1943 | 1929 | ||
1944 | static inline void rfcomm_process_sessions(void) | 1930 | static void rfcomm_process_sessions(void) |
1945 | { | 1931 | { |
1946 | struct list_head *p, *n; | 1932 | struct list_head *p, *n; |
1947 | 1933 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index e8707debb864..7e1e59645c05 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -25,27 +25,8 @@ | |||
25 | * RFCOMM sockets. | 25 | * RFCOMM sockets. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/export.h> |
29 | |||
30 | #include <linux/types.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/poll.h> | ||
36 | #include <linux/fcntl.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/socket.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <linux/list.h> | ||
42 | #include <linux/device.h> | ||
43 | #include <linux/debugfs.h> | 29 | #include <linux/debugfs.h> |
44 | #include <linux/seq_file.h> | ||
45 | #include <linux/security.h> | ||
46 | #include <net/sock.h> | ||
47 | |||
48 | #include <linux/uaccess.h> | ||
49 | 30 | ||
50 | #include <net/bluetooth/bluetooth.h> | 31 | #include <net/bluetooth/bluetooth.h> |
51 | #include <net/bluetooth/hci_core.h> | 32 | #include <net/bluetooth/hci_core.h> |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index d1820ff14aee..cb960773c002 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -31,11 +31,6 @@ | |||
31 | #include <linux/tty_driver.h> | 31 | #include <linux/tty_driver.h> |
32 | #include <linux/tty_flip.h> | 32 | #include <linux/tty_flip.h> |
33 | 33 | ||
34 | #include <linux/capability.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/workqueue.h> | ||
38 | |||
39 | #include <net/bluetooth/bluetooth.h> | 34 | #include <net/bluetooth/bluetooth.h> |
40 | #include <net/bluetooth/hci_core.h> | 35 | #include <net/bluetooth/hci_core.h> |
41 | #include <net/bluetooth/rfcomm.h> | 36 | #include <net/bluetooth/rfcomm.h> |
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id) | |||
132 | return NULL; | 127 | return NULL; |
133 | } | 128 | } |
134 | 129 | ||
135 | static inline struct rfcomm_dev *rfcomm_dev_get(int id) | 130 | static struct rfcomm_dev *rfcomm_dev_get(int id) |
136 | { | 131 | { |
137 | struct rfcomm_dev *dev; | 132 | struct rfcomm_dev *dev; |
138 | 133 | ||
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb) | |||
345 | tty_port_put(&dev->port); | 340 | tty_port_put(&dev->port); |
346 | } | 341 | } |
347 | 342 | ||
348 | static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) | 343 | static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) |
349 | { | 344 | { |
350 | tty_port_get(&dev->port); | 345 | tty_port_get(&dev->port); |
351 | atomic_add(skb->truesize, &dev->wmem_alloc); | 346 | atomic_add(skb->truesize, &dev->wmem_alloc); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index cbdd313659a7..40bbe25dcff7 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -25,26 +25,8 @@ | |||
25 | /* Bluetooth SCO sockets. */ | 25 | /* Bluetooth SCO sockets. */ |
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/poll.h> | ||
35 | #include <linux/fcntl.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/skbuff.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
42 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
43 | #include <linux/list.h> | ||
44 | #include <linux/security.h> | ||
45 | #include <net/sock.h> | ||
46 | |||
47 | #include <linux/uaccess.h> | ||
48 | 30 | ||
49 | #include <net/bluetooth/bluetooth.h> | 31 | #include <net/bluetooth/bluetooth.h> |
50 | #include <net/bluetooth/hci_core.h> | 32 | #include <net/bluetooth/hci_core.h> |
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) | |||
123 | return conn; | 105 | return conn; |
124 | } | 106 | } |
125 | 107 | ||
126 | static inline struct sock *sco_chan_get(struct sco_conn *conn) | 108 | static struct sock *sco_chan_get(struct sco_conn *conn) |
127 | { | 109 | { |
128 | struct sock *sk = NULL; | 110 | struct sock *sk = NULL; |
129 | sco_conn_lock(conn); | 111 | sco_conn_lock(conn); |
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err) | |||
157 | return 0; | 139 | return 0; |
158 | } | 140 | } |
159 | 141 | ||
160 | static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) | 142 | static int sco_chan_add(struct sco_conn *conn, struct sock *sk, |
143 | struct sock *parent) | ||
161 | { | 144 | { |
162 | int err = 0; | 145 | int err = 0; |
163 | 146 | ||
@@ -228,7 +211,7 @@ done: | |||
228 | return err; | 211 | return err; |
229 | } | 212 | } |
230 | 213 | ||
231 | static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) | 214 | static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) |
232 | { | 215 | { |
233 | struct sco_conn *conn = sco_pi(sk)->conn; | 216 | struct sco_conn *conn = sco_pi(sk)->conn; |
234 | struct sk_buff *skb; | 217 | struct sk_buff *skb; |
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) | |||
254 | return len; | 237 | return len; |
255 | } | 238 | } |
256 | 239 | ||
257 | static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) | 240 | static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) |
258 | { | 241 | { |
259 | struct sock *sk = sco_chan_get(conn); | 242 | struct sock *sk = sco_chan_get(conn); |
260 | 243 | ||
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen | |||
523 | goto done; | 506 | goto done; |
524 | 507 | ||
525 | err = bt_sock_wait_state(sk, BT_CONNECTED, | 508 | err = bt_sock_wait_state(sk, BT_CONNECTED, |
526 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 509 | sock_sndtimeo(sk, flags & O_NONBLOCK)); |
527 | 510 | ||
528 | done: | 511 | done: |
529 | release_sock(sk); | 512 | release_sock(sk); |
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how) | |||
788 | 771 | ||
789 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 772 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
790 | err = bt_sock_wait_state(sk, BT_CLOSED, | 773 | err = bt_sock_wait_state(sk, BT_CLOSED, |
791 | sk->sk_lingertime); | 774 | sk->sk_lingertime); |
792 | } | 775 | } |
793 | release_sock(sk); | 776 | release_sock(sk); |
794 | return err; | 777 | return err; |
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn) | |||
878 | bh_lock_sock(parent); | 861 | bh_lock_sock(parent); |
879 | 862 | ||
880 | sk = sco_sock_alloc(sock_net(parent), NULL, | 863 | sk = sco_sock_alloc(sock_net(parent), NULL, |
881 | BTPROTO_SCO, GFP_ATOMIC); | 864 | BTPROTO_SCO, GFP_ATOMIC); |
882 | if (!sk) { | 865 | if (!sk) { |
883 | bh_unlock_sock(parent); | 866 | bh_unlock_sock(parent); |
884 | goto done; | 867 | goto done; |
@@ -907,7 +890,7 @@ done: | |||
907 | /* ----- SCO interface with lower layer (HCI) ----- */ | 890 | /* ----- SCO interface with lower layer (HCI) ----- */ |
908 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) | 891 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) |
909 | { | 892 | { |
910 | register struct sock *sk; | 893 | struct sock *sk; |
911 | struct hlist_node *node; | 894 | struct hlist_node *node; |
912 | int lm = 0; | 895 | int lm = 0; |
913 | 896 | ||
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
920 | continue; | 903 | continue; |
921 | 904 | ||
922 | if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || | 905 | if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || |
923 | !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { | 906 | !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { |
924 | lm |= HCI_LM_ACCEPT; | 907 | lm |= HCI_LM_ACCEPT; |
925 | break; | 908 | break; |
926 | } | 909 | } |
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p) | |||
981 | 964 | ||
982 | sk_for_each(sk, node, &sco_sk_list.head) { | 965 | sk_for_each(sk, node, &sco_sk_list.head) { |
983 | seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), | 966 | seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), |
984 | batostr(&bt_sk(sk)->dst), sk->sk_state); | 967 | batostr(&bt_sk(sk)->dst), sk->sk_state); |
985 | } | 968 | } |
986 | 969 | ||
987 | read_unlock(&sco_sk_list.lock); | 970 | read_unlock(&sco_sk_list.lock); |
@@ -1044,8 +1027,8 @@ int __init sco_init(void) | |||
1044 | } | 1027 | } |
1045 | 1028 | ||
1046 | if (bt_debugfs) { | 1029 | if (bt_debugfs) { |
1047 | sco_debugfs = debugfs_create_file("sco", 0444, | 1030 | sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, |
1048 | bt_debugfs, NULL, &sco_debugfs_fops); | 1031 | NULL, &sco_debugfs_fops); |
1049 | if (!sco_debugfs) | 1032 | if (!sco_debugfs) |
1050 | BT_ERR("Failed to create SCO debug file"); | 1033 | BT_ERR("Failed to create SCO debug file"); |
1051 | } | 1034 | } |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 6fc7c4708f3e..16ef0dc85a0a 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -20,14 +20,15 @@ | |||
20 | SOFTWARE IS DISCLAIMED. | 20 | SOFTWARE IS DISCLAIMED. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/crypto.h> | ||
24 | #include <linux/scatterlist.h> | ||
25 | #include <crypto/b128ops.h> | ||
26 | |||
23 | #include <net/bluetooth/bluetooth.h> | 27 | #include <net/bluetooth/bluetooth.h> |
24 | #include <net/bluetooth/hci_core.h> | 28 | #include <net/bluetooth/hci_core.h> |
25 | #include <net/bluetooth/l2cap.h> | 29 | #include <net/bluetooth/l2cap.h> |
26 | #include <net/bluetooth/mgmt.h> | 30 | #include <net/bluetooth/mgmt.h> |
27 | #include <net/bluetooth/smp.h> | 31 | #include <net/bluetooth/smp.h> |
28 | #include <linux/crypto.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | #include <crypto/b128ops.h> | ||
31 | 32 | ||
32 | #define SMP_TIMEOUT msecs_to_jiffies(30000) | 33 | #define SMP_TIMEOUT msecs_to_jiffies(30000) |
33 | 34 | ||
@@ -648,7 +649,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
648 | 649 | ||
649 | auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; | 650 | auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; |
650 | 651 | ||
651 | ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); | 652 | ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); |
652 | if (ret) | 653 | if (ret) |
653 | return SMP_UNSPECIFIED; | 654 | return SMP_UNSPECIFIED; |
654 | 655 | ||
@@ -703,7 +704,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) | |||
703 | return 0; | 704 | return 0; |
704 | } | 705 | } |
705 | 706 | ||
706 | static u8 smp_ltk_encrypt(struct l2cap_conn *conn) | 707 | static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) |
707 | { | 708 | { |
708 | struct smp_ltk *key; | 709 | struct smp_ltk *key; |
709 | struct hci_conn *hcon = conn->hcon; | 710 | struct hci_conn *hcon = conn->hcon; |
@@ -712,6 +713,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn) | |||
712 | if (!key) | 713 | if (!key) |
713 | return 0; | 714 | return 0; |
714 | 715 | ||
716 | if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) | ||
717 | return 0; | ||
718 | |||
715 | if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) | 719 | if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) |
716 | return 1; | 720 | return 1; |
717 | 721 | ||
@@ -732,7 +736,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
732 | 736 | ||
733 | hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); | 737 | hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); |
734 | 738 | ||
735 | if (smp_ltk_encrypt(conn)) | 739 | if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) |
736 | return 0; | 740 | return 0; |
737 | 741 | ||
738 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) | 742 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) |
@@ -771,7 +775,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) | |||
771 | return 1; | 775 | return 1; |
772 | 776 | ||
773 | if (hcon->link_mode & HCI_LM_MASTER) | 777 | if (hcon->link_mode & HCI_LM_MASTER) |
774 | if (smp_ltk_encrypt(conn)) | 778 | if (smp_ltk_encrypt(conn, sec_level)) |
775 | goto done; | 779 | goto done; |
776 | 780 | ||
777 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) | 781 | if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7722a7336a58..c2a2dcbfdf01 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2111,6 +2111,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2111 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2111 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2112 | int i, ret; | 2112 | int i, ret; |
2113 | 2113 | ||
2114 | if (!ieee80211_sdata_running(sdata)) | ||
2115 | return -ENETDOWN; | ||
2116 | |||
2114 | if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { | 2117 | if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { |
2115 | ret = drv_set_bitrate_mask(local, sdata, mask); | 2118 | ret = drv_set_bitrate_mask(local, sdata, mask); |
2116 | if (ret) | 2119 | if (ret) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e11cd0e033ef..f1a80da4e56a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1334,6 +1334,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1334 | if (WARN_ON(!ifmgd->associated)) | 1334 | if (WARN_ON(!ifmgd->associated)) |
1335 | return; | 1335 | return; |
1336 | 1336 | ||
1337 | ieee80211_stop_poll(sdata); | ||
1338 | |||
1337 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); | 1339 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); |
1338 | 1340 | ||
1339 | ifmgd->associated = NULL; | 1341 | ifmgd->associated = NULL; |
@@ -2588,8 +2590,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, | |||
2588 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2590 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2589 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; | 2591 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; |
2590 | 2592 | ||
2591 | ieee80211_stop_poll(sdata); | ||
2592 | |||
2593 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, | 2593 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, |
2594 | false, frame_buf); | 2594 | false, frame_buf); |
2595 | mutex_unlock(&ifmgd->mtx); | 2595 | mutex_unlock(&ifmgd->mtx); |
@@ -3080,7 +3080,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
3080 | } | 3080 | } |
3081 | 3081 | ||
3082 | local->oper_channel = cbss->channel; | 3082 | local->oper_channel = cbss->channel; |
3083 | ieee80211_hw_config(local, 0); | 3083 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
3084 | 3084 | ||
3085 | if (sta) { | 3085 | if (sta) { |
3086 | u32 rates = 0, basic_rates = 0; | 3086 | u32 rates = 0, basic_rates = 0; |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 3bb24a121c95..a470e1123a55 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme { | |||
271 | * @plink_timer: peer link watch timer | 271 | * @plink_timer: peer link watch timer |
272 | * @plink_timer_was_running: used by suspend/resume to restore timers | 272 | * @plink_timer_was_running: used by suspend/resume to restore timers |
273 | * @t_offset: timing offset relative to this host | 273 | * @t_offset: timing offset relative to this host |
274 | * @t_offset_setpoint: reference timing offset of this sta to be used when | ||
275 | * calculating clockdrift | ||
276 | * @ch_type: peer's channel type | ||
274 | * @debugfs: debug filesystem info | 277 | * @debugfs: debug filesystem info |
275 | * @dead: set to true when sta is unlinked | 278 | * @dead: set to true when sta is unlinked |
276 | * @uploaded: set to true when sta is uploaded to the driver | 279 | * @uploaded: set to true when sta is uploaded to the driver |
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme { | |||
278 | * @sta: station information we share with the driver | 281 | * @sta: station information we share with the driver |
279 | * @sta_state: duplicates information about station state (for debug) | 282 | * @sta_state: duplicates information about station state (for debug) |
280 | * @beacon_loss_count: number of times beacon loss has triggered | 283 | * @beacon_loss_count: number of times beacon loss has triggered |
284 | * @supports_40mhz: tracks whether the station advertised 40 MHz support | ||
285 | * as we overwrite its HT parameters with the currently used value | ||
281 | */ | 286 | */ |
282 | struct sta_info { | 287 | struct sta_info { |
283 | /* General information, mostly static */ | 288 | /* General information, mostly static */ |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 9f6ce011d35d..4177bb5104b9 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
@@ -121,14 +121,14 @@ error: | |||
121 | * The device remains polling for targets until a target is found or | 121 | * The device remains polling for targets until a target is found or |
122 | * the nfc_stop_poll function is called. | 122 | * the nfc_stop_poll function is called. |
123 | */ | 123 | */ |
124 | int nfc_start_poll(struct nfc_dev *dev, u32 protocols) | 124 | int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) |
125 | { | 125 | { |
126 | int rc; | 126 | int rc; |
127 | 127 | ||
128 | pr_debug("dev_name=%s protocols=0x%x\n", | 128 | pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n", |
129 | dev_name(&dev->dev), protocols); | 129 | dev_name(&dev->dev), im_protocols, tm_protocols); |
130 | 130 | ||
131 | if (!protocols) | 131 | if (!im_protocols && !tm_protocols) |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | 133 | ||
134 | device_lock(&dev->dev); | 134 | device_lock(&dev->dev); |
@@ -143,9 +143,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols) | |||
143 | goto error; | 143 | goto error; |
144 | } | 144 | } |
145 | 145 | ||
146 | rc = dev->ops->start_poll(dev, protocols); | 146 | rc = dev->ops->start_poll(dev, im_protocols, tm_protocols); |
147 | if (!rc) | 147 | if (!rc) { |
148 | dev->polling = true; | 148 | dev->polling = true; |
149 | dev->rf_mode = NFC_RF_NONE; | ||
150 | } | ||
149 | 151 | ||
150 | error: | 152 | error: |
151 | device_unlock(&dev->dev); | 153 | device_unlock(&dev->dev); |
@@ -235,8 +237,10 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) | |||
235 | } | 237 | } |
236 | 238 | ||
237 | rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); | 239 | rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); |
238 | if (!rc) | 240 | if (!rc) { |
239 | dev->active_target = target; | 241 | dev->active_target = target; |
242 | dev->rf_mode = NFC_RF_INITIATOR; | ||
243 | } | ||
240 | 244 | ||
241 | error: | 245 | error: |
242 | device_unlock(&dev->dev); | 246 | device_unlock(&dev->dev); |
@@ -264,11 +268,6 @@ int nfc_dep_link_down(struct nfc_dev *dev) | |||
264 | goto error; | 268 | goto error; |
265 | } | 269 | } |
266 | 270 | ||
267 | if (dev->dep_rf_mode == NFC_RF_TARGET) { | ||
268 | rc = -EOPNOTSUPP; | ||
269 | goto error; | ||
270 | } | ||
271 | |||
272 | rc = dev->ops->dep_link_down(dev); | 271 | rc = dev->ops->dep_link_down(dev); |
273 | if (!rc) { | 272 | if (!rc) { |
274 | dev->dep_link_up = false; | 273 | dev->dep_link_up = false; |
@@ -286,7 +285,6 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, | |||
286 | u8 comm_mode, u8 rf_mode) | 285 | u8 comm_mode, u8 rf_mode) |
287 | { | 286 | { |
288 | dev->dep_link_up = true; | 287 | dev->dep_link_up = true; |
289 | dev->dep_rf_mode = rf_mode; | ||
290 | 288 | ||
291 | nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); | 289 | nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); |
292 | 290 | ||
@@ -330,6 +328,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) | |||
330 | rc = dev->ops->activate_target(dev, target, protocol); | 328 | rc = dev->ops->activate_target(dev, target, protocol); |
331 | if (!rc) { | 329 | if (!rc) { |
332 | dev->active_target = target; | 330 | dev->active_target = target; |
331 | dev->rf_mode = NFC_RF_INITIATOR; | ||
333 | 332 | ||
334 | if (dev->ops->check_presence) | 333 | if (dev->ops->check_presence) |
335 | mod_timer(&dev->check_pres_timer, jiffies + | 334 | mod_timer(&dev->check_pres_timer, jiffies + |
@@ -409,27 +408,30 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, | |||
409 | goto error; | 408 | goto error; |
410 | } | 409 | } |
411 | 410 | ||
412 | if (dev->active_target == NULL) { | 411 | if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) { |
413 | rc = -ENOTCONN; | 412 | if (dev->active_target->idx != target_idx) { |
414 | kfree_skb(skb); | 413 | rc = -EADDRNOTAVAIL; |
415 | goto error; | 414 | kfree_skb(skb); |
416 | } | 415 | goto error; |
416 | } | ||
417 | 417 | ||
418 | if (dev->active_target->idx != target_idx) { | 418 | if (dev->ops->check_presence) |
419 | rc = -EADDRNOTAVAIL; | 419 | del_timer_sync(&dev->check_pres_timer); |
420 | |||
421 | rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, | ||
422 | cb_context); | ||
423 | |||
424 | if (!rc && dev->ops->check_presence) | ||
425 | mod_timer(&dev->check_pres_timer, jiffies + | ||
426 | msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); | ||
427 | } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) { | ||
428 | rc = dev->ops->tm_send(dev, skb); | ||
429 | } else { | ||
430 | rc = -ENOTCONN; | ||
420 | kfree_skb(skb); | 431 | kfree_skb(skb); |
421 | goto error; | 432 | goto error; |
422 | } | 433 | } |
423 | 434 | ||
424 | if (dev->ops->check_presence) | ||
425 | del_timer_sync(&dev->check_pres_timer); | ||
426 | |||
427 | rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb, | ||
428 | cb_context); | ||
429 | |||
430 | if (!rc && dev->ops->check_presence) | ||
431 | mod_timer(&dev->check_pres_timer, jiffies + | ||
432 | msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); | ||
433 | 435 | ||
434 | error: | 436 | error: |
435 | device_unlock(&dev->dev); | 437 | device_unlock(&dev->dev); |
@@ -447,6 +449,63 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) | |||
447 | } | 449 | } |
448 | EXPORT_SYMBOL(nfc_set_remote_general_bytes); | 450 | EXPORT_SYMBOL(nfc_set_remote_general_bytes); |
449 | 451 | ||
452 | u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len) | ||
453 | { | ||
454 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); | ||
455 | |||
456 | return nfc_llcp_general_bytes(dev, gb_len); | ||
457 | } | ||
458 | EXPORT_SYMBOL(nfc_get_local_general_bytes); | ||
459 | |||
460 | int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb) | ||
461 | { | ||
462 | /* Only LLCP target mode for now */ | ||
463 | if (dev->dep_link_up == false) { | ||
464 | kfree_skb(skb); | ||
465 | return -ENOLINK; | ||
466 | } | ||
467 | |||
468 | return nfc_llcp_data_received(dev, skb); | ||
469 | } | ||
470 | EXPORT_SYMBOL(nfc_tm_data_received); | ||
471 | |||
472 | int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, | ||
473 | u8 *gb, size_t gb_len) | ||
474 | { | ||
475 | int rc; | ||
476 | |||
477 | device_lock(&dev->dev); | ||
478 | |||
479 | dev->polling = false; | ||
480 | |||
481 | if (gb != NULL) { | ||
482 | rc = nfc_set_remote_general_bytes(dev, gb, gb_len); | ||
483 | if (rc < 0) | ||
484 | goto out; | ||
485 | } | ||
486 | |||
487 | dev->rf_mode = NFC_RF_TARGET; | ||
488 | |||
489 | if (protocol == NFC_PROTO_NFC_DEP_MASK) | ||
490 | nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET); | ||
491 | |||
492 | rc = nfc_genl_tm_activated(dev, protocol); | ||
493 | |||
494 | out: | ||
495 | device_unlock(&dev->dev); | ||
496 | |||
497 | return rc; | ||
498 | } | ||
499 | EXPORT_SYMBOL(nfc_tm_activated); | ||
500 | |||
501 | int nfc_tm_deactivated(struct nfc_dev *dev) | ||
502 | { | ||
503 | dev->dep_link_up = false; | ||
504 | |||
505 | return nfc_genl_tm_deactivated(dev); | ||
506 | } | ||
507 | EXPORT_SYMBOL(nfc_tm_deactivated); | ||
508 | |||
450 | /** | 509 | /** |
451 | * nfc_alloc_send_skb - allocate a skb for data exchange responses | 510 | * nfc_alloc_send_skb - allocate a skb for data exchange responses |
452 | * | 511 | * |
@@ -678,7 +737,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | |||
678 | struct nfc_dev *dev; | 737 | struct nfc_dev *dev; |
679 | 738 | ||
680 | if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || | 739 | if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || |
681 | !ops->deactivate_target || !ops->data_exchange) | 740 | !ops->deactivate_target || !ops->im_transceive) |
682 | return NULL; | 741 | return NULL; |
683 | 742 | ||
684 | if (!supported_protocols) | 743 | if (!supported_protocols) |
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index e1a640d2b588..a8b0b71e8f86 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
@@ -481,12 +481,13 @@ static int hci_dev_down(struct nfc_dev *nfc_dev) | |||
481 | return 0; | 481 | return 0; |
482 | } | 482 | } |
483 | 483 | ||
484 | static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) | 484 | static int hci_start_poll(struct nfc_dev *nfc_dev, |
485 | u32 im_protocols, u32 tm_protocols) | ||
485 | { | 486 | { |
486 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 487 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
487 | 488 | ||
488 | if (hdev->ops->start_poll) | 489 | if (hdev->ops->start_poll) |
489 | return hdev->ops->start_poll(hdev, protocols); | 490 | return hdev->ops->start_poll(hdev, im_protocols, tm_protocols); |
490 | else | 491 | else |
491 | return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, | 492 | return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, |
492 | NFC_HCI_EVT_READER_REQUESTED, NULL, 0); | 493 | NFC_HCI_EVT_READER_REQUESTED, NULL, 0); |
@@ -511,9 +512,9 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev, | |||
511 | { | 512 | { |
512 | } | 513 | } |
513 | 514 | ||
514 | static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, | 515 | static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, |
515 | struct sk_buff *skb, data_exchange_cb_t cb, | 516 | struct sk_buff *skb, data_exchange_cb_t cb, |
516 | void *cb_context) | 517 | void *cb_context) |
517 | { | 518 | { |
518 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 519 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
519 | int r; | 520 | int r; |
@@ -579,7 +580,7 @@ static struct nfc_ops hci_nfc_ops = { | |||
579 | .stop_poll = hci_stop_poll, | 580 | .stop_poll = hci_stop_poll, |
580 | .activate_target = hci_activate_target, | 581 | .activate_target = hci_activate_target, |
581 | .deactivate_target = hci_deactivate_target, | 582 | .deactivate_target = hci_deactivate_target, |
582 | .data_exchange = hci_data_exchange, | 583 | .im_transceive = hci_transceive, |
583 | .check_presence = hci_check_presence, | 584 | .check_presence = hci_check_presence, |
584 | }; | 585 | }; |
585 | 586 | ||
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c index 5665dc6d893a..6b836e6242b7 100644 --- a/net/nfc/hci/shdlc.c +++ b/net/nfc/hci/shdlc.c | |||
@@ -765,14 +765,16 @@ static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) | |||
765 | return 0; | 765 | return 0; |
766 | } | 766 | } |
767 | 767 | ||
768 | static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols) | 768 | static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, |
769 | u32 im_protocols, u32 tm_protocols) | ||
769 | { | 770 | { |
770 | struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); | 771 | struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); |
771 | 772 | ||
772 | pr_debug("\n"); | 773 | pr_debug("\n"); |
773 | 774 | ||
774 | if (shdlc->ops->start_poll) | 775 | if (shdlc->ops->start_poll) |
775 | return shdlc->ops->start_poll(shdlc, protocols); | 776 | return shdlc->ops->start_poll(shdlc, |
777 | im_protocols, tm_protocols); | ||
776 | 778 | ||
777 | return 0; | 779 | return 0; |
778 | } | 780 | } |
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index bf8ae4f0b90c..b982b5b890d7 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c | |||
@@ -51,7 +51,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type) | |||
51 | return tlv[2]; | 51 | return tlv[2]; |
52 | } | 52 | } |
53 | 53 | ||
54 | static u8 llcp_tlv16(u8 *tlv, u8 type) | 54 | static u16 llcp_tlv16(u8 *tlv, u8 type) |
55 | { | 55 | { |
56 | if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) | 56 | if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) |
57 | return 0; | 57 | return 0; |
@@ -67,7 +67,7 @@ static u8 llcp_tlv_version(u8 *tlv) | |||
67 | 67 | ||
68 | static u16 llcp_tlv_miux(u8 *tlv) | 68 | static u16 llcp_tlv_miux(u8 *tlv) |
69 | { | 69 | { |
70 | return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f; | 70 | return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff; |
71 | } | 71 | } |
72 | 72 | ||
73 | static u16 llcp_tlv_wks(u8 *tlv) | 73 | static u16 llcp_tlv_wks(u8 *tlv) |
@@ -117,8 +117,8 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length) | |||
117 | return tlv; | 117 | return tlv; |
118 | } | 118 | } |
119 | 119 | ||
120 | int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, | 120 | int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, |
121 | u8 *tlv_array, u16 tlv_array_len) | 121 | u8 *tlv_array, u16 tlv_array_len) |
122 | { | 122 | { |
123 | u8 *tlv = tlv_array, type, length, offset = 0; | 123 | u8 *tlv = tlv_array, type, length, offset = 0; |
124 | 124 | ||
@@ -149,8 +149,45 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, | |||
149 | case LLCP_TLV_OPT: | 149 | case LLCP_TLV_OPT: |
150 | local->remote_opt = llcp_tlv_opt(tlv); | 150 | local->remote_opt = llcp_tlv_opt(tlv); |
151 | break; | 151 | break; |
152 | default: | ||
153 | pr_err("Invalid gt tlv value 0x%x\n", type); | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | offset += length + 2; | ||
158 | tlv += length + 2; | ||
159 | } | ||
160 | |||
161 | pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n", | ||
162 | local->remote_version, local->remote_miu, | ||
163 | local->remote_lto, local->remote_opt, | ||
164 | local->remote_wks); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock, | ||
170 | u8 *tlv_array, u16 tlv_array_len) | ||
171 | { | ||
172 | u8 *tlv = tlv_array, type, length, offset = 0; | ||
173 | |||
174 | pr_debug("TLV array length %d\n", tlv_array_len); | ||
175 | |||
176 | if (sock == NULL) | ||
177 | return -ENOTCONN; | ||
178 | |||
179 | while (offset < tlv_array_len) { | ||
180 | type = tlv[0]; | ||
181 | length = tlv[1]; | ||
182 | |||
183 | pr_debug("type 0x%x length %d\n", type, length); | ||
184 | |||
185 | switch (type) { | ||
186 | case LLCP_TLV_MIUX: | ||
187 | sock->miu = llcp_tlv_miux(tlv) + 128; | ||
188 | break; | ||
152 | case LLCP_TLV_RW: | 189 | case LLCP_TLV_RW: |
153 | local->remote_rw = llcp_tlv_rw(tlv); | 190 | sock->rw = llcp_tlv_rw(tlv); |
154 | break; | 191 | break; |
155 | case LLCP_TLV_SN: | 192 | case LLCP_TLV_SN: |
156 | break; | 193 | break; |
@@ -163,10 +200,7 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, | |||
163 | tlv += length + 2; | 200 | tlv += length + 2; |
164 | } | 201 | } |
165 | 202 | ||
166 | pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", | 203 | pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu); |
167 | local->remote_version, local->remote_miu, | ||
168 | local->remote_lto, local->remote_opt, | ||
169 | local->remote_wks, local->remote_rw); | ||
170 | 204 | ||
171 | return 0; | 205 | return 0; |
172 | } | 206 | } |
@@ -474,7 +508,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, | |||
474 | 508 | ||
475 | while (remaining_len > 0) { | 509 | while (remaining_len > 0) { |
476 | 510 | ||
477 | frag_len = min_t(size_t, local->remote_miu, remaining_len); | 511 | frag_len = min_t(size_t, sock->miu, remaining_len); |
478 | 512 | ||
479 | pr_debug("Fragment %zd bytes remaining %zd", | 513 | pr_debug("Fragment %zd bytes remaining %zd", |
480 | frag_len, remaining_len); | 514 | frag_len, remaining_len); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 42994fac26d6..5d503eeb15a1 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -31,47 +31,41 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; | |||
31 | 31 | ||
32 | static struct list_head llcp_devices; | 32 | static struct list_head llcp_devices; |
33 | 33 | ||
34 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local) | 34 | void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk) |
35 | { | 35 | { |
36 | struct nfc_llcp_sock *parent, *s, *n; | 36 | write_lock(&l->lock); |
37 | struct sock *sk, *parent_sk; | 37 | sk_add_node(sk, &l->head); |
38 | int i; | 38 | write_unlock(&l->lock); |
39 | 39 | } | |
40 | mutex_lock(&local->socket_lock); | ||
41 | |||
42 | for (i = 0; i < LLCP_MAX_SAP; i++) { | ||
43 | parent = local->sockets[i]; | ||
44 | if (parent == NULL) | ||
45 | continue; | ||
46 | |||
47 | /* Release all child sockets */ | ||
48 | list_for_each_entry_safe(s, n, &parent->list, list) { | ||
49 | list_del_init(&s->list); | ||
50 | sk = &s->sk; | ||
51 | |||
52 | lock_sock(sk); | ||
53 | |||
54 | if (sk->sk_state == LLCP_CONNECTED) | ||
55 | nfc_put_device(s->dev); | ||
56 | 40 | ||
57 | sk->sk_state = LLCP_CLOSED; | 41 | void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) |
42 | { | ||
43 | write_lock(&l->lock); | ||
44 | sk_del_node_init(sk); | ||
45 | write_unlock(&l->lock); | ||
46 | } | ||
58 | 47 | ||
59 | release_sock(sk); | 48 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local) |
49 | { | ||
50 | struct sock *sk; | ||
51 | struct hlist_node *node, *tmp; | ||
52 | struct nfc_llcp_sock *llcp_sock; | ||
60 | 53 | ||
61 | sock_orphan(sk); | 54 | write_lock(&local->sockets.lock); |
62 | 55 | ||
63 | s->local = NULL; | 56 | sk_for_each_safe(sk, node, tmp, &local->sockets.head) { |
64 | } | 57 | llcp_sock = nfc_llcp_sock(sk); |
65 | 58 | ||
66 | parent_sk = &parent->sk; | 59 | lock_sock(sk); |
67 | 60 | ||
68 | lock_sock(parent_sk); | 61 | if (sk->sk_state == LLCP_CONNECTED) |
62 | nfc_put_device(llcp_sock->dev); | ||
69 | 63 | ||
70 | if (parent_sk->sk_state == LLCP_LISTEN) { | 64 | if (sk->sk_state == LLCP_LISTEN) { |
71 | struct nfc_llcp_sock *lsk, *n; | 65 | struct nfc_llcp_sock *lsk, *n; |
72 | struct sock *accept_sk; | 66 | struct sock *accept_sk; |
73 | 67 | ||
74 | list_for_each_entry_safe(lsk, n, &parent->accept_queue, | 68 | list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, |
75 | accept_queue) { | 69 | accept_queue) { |
76 | accept_sk = &lsk->sk; | 70 | accept_sk = &lsk->sk; |
77 | lock_sock(accept_sk); | 71 | lock_sock(accept_sk); |
@@ -83,24 +77,53 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local) | |||
83 | release_sock(accept_sk); | 77 | release_sock(accept_sk); |
84 | 78 | ||
85 | sock_orphan(accept_sk); | 79 | sock_orphan(accept_sk); |
86 | |||
87 | lsk->local = NULL; | ||
88 | } | 80 | } |
89 | } | 81 | } |
90 | 82 | ||
91 | if (parent_sk->sk_state == LLCP_CONNECTED) | 83 | sk->sk_state = LLCP_CLOSED; |
92 | nfc_put_device(parent->dev); | ||
93 | |||
94 | parent_sk->sk_state = LLCP_CLOSED; | ||
95 | 84 | ||
96 | release_sock(parent_sk); | 85 | release_sock(sk); |
97 | 86 | ||
98 | sock_orphan(parent_sk); | 87 | sock_orphan(sk); |
99 | 88 | ||
100 | parent->local = NULL; | 89 | sk_del_node_init(sk); |
101 | } | 90 | } |
102 | 91 | ||
103 | mutex_unlock(&local->socket_lock); | 92 | write_unlock(&local->sockets.lock); |
93 | } | ||
94 | |||
95 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | ||
96 | { | ||
97 | kref_get(&local->ref); | ||
98 | |||
99 | return local; | ||
100 | } | ||
101 | |||
102 | static void local_release(struct kref *ref) | ||
103 | { | ||
104 | struct nfc_llcp_local *local; | ||
105 | |||
106 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
107 | |||
108 | list_del(&local->list); | ||
109 | nfc_llcp_socket_release(local); | ||
110 | del_timer_sync(&local->link_timer); | ||
111 | skb_queue_purge(&local->tx_queue); | ||
112 | destroy_workqueue(local->tx_wq); | ||
113 | destroy_workqueue(local->rx_wq); | ||
114 | destroy_workqueue(local->timeout_wq); | ||
115 | kfree_skb(local->rx_pending); | ||
116 | kfree(local); | ||
117 | } | ||
118 | |||
119 | int nfc_llcp_local_put(struct nfc_llcp_local *local) | ||
120 | { | ||
121 | WARN_ON(local == NULL); | ||
122 | |||
123 | if (local == NULL) | ||
124 | return 0; | ||
125 | |||
126 | return kref_put(&local->ref, local_release); | ||
104 | } | 127 | } |
105 | 128 | ||
106 | static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local) | 129 | static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local) |
@@ -384,31 +407,9 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) | |||
384 | return -EINVAL; | 407 | return -EINVAL; |
385 | } | 408 | } |
386 | 409 | ||
387 | return nfc_llcp_parse_tlv(local, | 410 | return nfc_llcp_parse_gb_tlv(local, |
388 | &local->remote_gb[3], | 411 | &local->remote_gb[3], |
389 | local->remote_gb_len - 3); | 412 | local->remote_gb_len - 3); |
390 | } | ||
391 | |||
392 | static void nfc_llcp_tx_work(struct work_struct *work) | ||
393 | { | ||
394 | struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, | ||
395 | tx_work); | ||
396 | struct sk_buff *skb; | ||
397 | |||
398 | skb = skb_dequeue(&local->tx_queue); | ||
399 | if (skb != NULL) { | ||
400 | pr_debug("Sending pending skb\n"); | ||
401 | print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET, | ||
402 | 16, 1, skb->data, skb->len, true); | ||
403 | |||
404 | nfc_data_exchange(local->dev, local->target_idx, | ||
405 | skb, nfc_llcp_recv, local); | ||
406 | } else { | ||
407 | nfc_llcp_send_symm(local->dev); | ||
408 | } | ||
409 | |||
410 | mod_timer(&local->link_timer, | ||
411 | jiffies + msecs_to_jiffies(local->remote_lto)); | ||
412 | } | 413 | } |
413 | 414 | ||
414 | static u8 nfc_llcp_dsap(struct sk_buff *pdu) | 415 | static u8 nfc_llcp_dsap(struct sk_buff *pdu) |
@@ -443,46 +444,146 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) | |||
443 | sock->recv_ack_n = (sock->recv_n - 1) % 16; | 444 | sock->recv_ack_n = (sock->recv_n - 1) % 16; |
444 | } | 445 | } |
445 | 446 | ||
447 | static void nfc_llcp_tx_work(struct work_struct *work) | ||
448 | { | ||
449 | struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, | ||
450 | tx_work); | ||
451 | struct sk_buff *skb; | ||
452 | struct sock *sk; | ||
453 | struct nfc_llcp_sock *llcp_sock; | ||
454 | |||
455 | skb = skb_dequeue(&local->tx_queue); | ||
456 | if (skb != NULL) { | ||
457 | sk = skb->sk; | ||
458 | llcp_sock = nfc_llcp_sock(sk); | ||
459 | if (llcp_sock != NULL) { | ||
460 | int ret; | ||
461 | |||
462 | pr_debug("Sending pending skb\n"); | ||
463 | print_hex_dump(KERN_DEBUG, "LLCP Tx: ", | ||
464 | DUMP_PREFIX_OFFSET, 16, 1, | ||
465 | skb->data, skb->len, true); | ||
466 | |||
467 | ret = nfc_data_exchange(local->dev, local->target_idx, | ||
468 | skb, nfc_llcp_recv, local); | ||
469 | |||
470 | if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) { | ||
471 | skb = skb_get(skb); | ||
472 | skb_queue_tail(&llcp_sock->tx_pending_queue, | ||
473 | skb); | ||
474 | } | ||
475 | } else { | ||
476 | nfc_llcp_send_symm(local->dev); | ||
477 | } | ||
478 | } else { | ||
479 | nfc_llcp_send_symm(local->dev); | ||
480 | } | ||
481 | |||
482 | mod_timer(&local->link_timer, | ||
483 | jiffies + msecs_to_jiffies(2 * local->remote_lto)); | ||
484 | } | ||
485 | |||
486 | static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local, | ||
487 | u8 ssap) | ||
488 | { | ||
489 | struct sock *sk; | ||
490 | struct nfc_llcp_sock *llcp_sock; | ||
491 | struct hlist_node *node; | ||
492 | |||
493 | read_lock(&local->connecting_sockets.lock); | ||
494 | |||
495 | sk_for_each(sk, node, &local->connecting_sockets.head) { | ||
496 | llcp_sock = nfc_llcp_sock(sk); | ||
497 | |||
498 | if (llcp_sock->ssap == ssap) { | ||
499 | sock_hold(&llcp_sock->sk); | ||
500 | goto out; | ||
501 | } | ||
502 | } | ||
503 | |||
504 | llcp_sock = NULL; | ||
505 | |||
506 | out: | ||
507 | read_unlock(&local->connecting_sockets.lock); | ||
508 | |||
509 | return llcp_sock; | ||
510 | } | ||
511 | |||
446 | static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | 512 | static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, |
447 | u8 ssap, u8 dsap) | 513 | u8 ssap, u8 dsap) |
448 | { | 514 | { |
449 | struct nfc_llcp_sock *sock, *llcp_sock, *n; | 515 | struct sock *sk; |
516 | struct hlist_node *node; | ||
517 | struct nfc_llcp_sock *llcp_sock; | ||
450 | 518 | ||
451 | pr_debug("ssap dsap %d %d\n", ssap, dsap); | 519 | pr_debug("ssap dsap %d %d\n", ssap, dsap); |
452 | 520 | ||
453 | if (ssap == 0 && dsap == 0) | 521 | if (ssap == 0 && dsap == 0) |
454 | return NULL; | 522 | return NULL; |
455 | 523 | ||
456 | mutex_lock(&local->socket_lock); | 524 | read_lock(&local->sockets.lock); |
457 | sock = local->sockets[ssap]; | ||
458 | if (sock == NULL) { | ||
459 | mutex_unlock(&local->socket_lock); | ||
460 | return NULL; | ||
461 | } | ||
462 | 525 | ||
463 | pr_debug("root dsap %d (%d)\n", sock->dsap, dsap); | 526 | llcp_sock = NULL; |
464 | 527 | ||
465 | if (sock->dsap == dsap) { | 528 | sk_for_each(sk, node, &local->sockets.head) { |
466 | sock_hold(&sock->sk); | 529 | llcp_sock = nfc_llcp_sock(sk); |
467 | mutex_unlock(&local->socket_lock); | 530 | |
468 | return sock; | 531 | if (llcp_sock->ssap == ssap && |
532 | llcp_sock->dsap == dsap) | ||
533 | break; | ||
469 | } | 534 | } |
470 | 535 | ||
471 | list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { | 536 | read_unlock(&local->sockets.lock); |
472 | pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock, | 537 | |
473 | &llcp_sock->sk, llcp_sock->dsap); | 538 | if (llcp_sock == NULL) |
474 | if (llcp_sock->dsap == dsap) { | 539 | return NULL; |
475 | sock_hold(&llcp_sock->sk); | 540 | |
476 | mutex_unlock(&local->socket_lock); | 541 | sock_hold(&llcp_sock->sk); |
477 | return llcp_sock; | 542 | |
478 | } | 543 | return llcp_sock; |
544 | } | ||
545 | |||
546 | static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local, | ||
547 | u8 *sn, size_t sn_len) | ||
548 | { | ||
549 | struct sock *sk; | ||
550 | struct hlist_node *node; | ||
551 | struct nfc_llcp_sock *llcp_sock; | ||
552 | |||
553 | pr_debug("sn %zd\n", sn_len); | ||
554 | |||
555 | if (sn == NULL || sn_len == 0) | ||
556 | return NULL; | ||
557 | |||
558 | read_lock(&local->sockets.lock); | ||
559 | |||
560 | llcp_sock = NULL; | ||
561 | |||
562 | sk_for_each(sk, node, &local->sockets.head) { | ||
563 | llcp_sock = nfc_llcp_sock(sk); | ||
564 | |||
565 | if (llcp_sock->sk.sk_state != LLCP_LISTEN) | ||
566 | continue; | ||
567 | |||
568 | if (llcp_sock->service_name == NULL || | ||
569 | llcp_sock->service_name_len == 0) | ||
570 | continue; | ||
571 | |||
572 | if (llcp_sock->service_name_len != sn_len) | ||
573 | continue; | ||
574 | |||
575 | if (memcmp(sn, llcp_sock->service_name, sn_len) == 0) | ||
576 | break; | ||
479 | } | 577 | } |
480 | 578 | ||
481 | pr_err("Could not find socket for %d %d\n", ssap, dsap); | 579 | read_unlock(&local->sockets.lock); |
482 | 580 | ||
483 | mutex_unlock(&local->socket_lock); | 581 | if (llcp_sock == NULL) |
582 | return NULL; | ||
484 | 583 | ||
485 | return NULL; | 584 | sock_hold(&llcp_sock->sk); |
585 | |||
586 | return llcp_sock; | ||
486 | } | 587 | } |
487 | 588 | ||
488 | static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) | 589 | static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) |
@@ -518,35 +619,19 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, | |||
518 | { | 619 | { |
519 | struct sock *new_sk, *parent; | 620 | struct sock *new_sk, *parent; |
520 | struct nfc_llcp_sock *sock, *new_sock; | 621 | struct nfc_llcp_sock *sock, *new_sock; |
521 | u8 dsap, ssap, bound_sap, reason; | 622 | u8 dsap, ssap, reason; |
522 | 623 | ||
523 | dsap = nfc_llcp_dsap(skb); | 624 | dsap = nfc_llcp_dsap(skb); |
524 | ssap = nfc_llcp_ssap(skb); | 625 | ssap = nfc_llcp_ssap(skb); |
525 | 626 | ||
526 | pr_debug("%d %d\n", dsap, ssap); | 627 | pr_debug("%d %d\n", dsap, ssap); |
527 | 628 | ||
528 | nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], | ||
529 | skb->len - LLCP_HEADER_SIZE); | ||
530 | |||
531 | if (dsap != LLCP_SAP_SDP) { | 629 | if (dsap != LLCP_SAP_SDP) { |
532 | bound_sap = dsap; | 630 | sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); |
533 | 631 | if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) { | |
534 | mutex_lock(&local->socket_lock); | ||
535 | sock = local->sockets[dsap]; | ||
536 | if (sock == NULL) { | ||
537 | mutex_unlock(&local->socket_lock); | ||
538 | reason = LLCP_DM_NOBOUND; | 632 | reason = LLCP_DM_NOBOUND; |
539 | goto fail; | 633 | goto fail; |
540 | } | 634 | } |
541 | |||
542 | sock_hold(&sock->sk); | ||
543 | mutex_unlock(&local->socket_lock); | ||
544 | |||
545 | lock_sock(&sock->sk); | ||
546 | |||
547 | if (sock->dsap == LLCP_SAP_SDP && | ||
548 | sock->sk.sk_state == LLCP_LISTEN) | ||
549 | goto enqueue; | ||
550 | } else { | 635 | } else { |
551 | u8 *sn; | 636 | u8 *sn; |
552 | size_t sn_len; | 637 | size_t sn_len; |
@@ -559,40 +644,15 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, | |||
559 | 644 | ||
560 | pr_debug("Service name length %zu\n", sn_len); | 645 | pr_debug("Service name length %zu\n", sn_len); |
561 | 646 | ||
562 | mutex_lock(&local->socket_lock); | 647 | sock = nfc_llcp_sock_get_sn(local, sn, sn_len); |
563 | for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; | 648 | if (sock == NULL) { |
564 | bound_sap++) { | 649 | reason = LLCP_DM_NOBOUND; |
565 | sock = local->sockets[bound_sap]; | 650 | goto fail; |
566 | if (sock == NULL) | ||
567 | continue; | ||
568 | |||
569 | if (sock->service_name == NULL || | ||
570 | sock->service_name_len == 0) | ||
571 | continue; | ||
572 | |||
573 | if (sock->service_name_len != sn_len) | ||
574 | continue; | ||
575 | |||
576 | if (sock->dsap == LLCP_SAP_SDP && | ||
577 | sock->sk.sk_state == LLCP_LISTEN && | ||
578 | !memcmp(sn, sock->service_name, sn_len)) { | ||
579 | pr_debug("Found service name at SAP %d\n", | ||
580 | bound_sap); | ||
581 | sock_hold(&sock->sk); | ||
582 | mutex_unlock(&local->socket_lock); | ||
583 | |||
584 | lock_sock(&sock->sk); | ||
585 | |||
586 | goto enqueue; | ||
587 | } | ||
588 | } | 651 | } |
589 | mutex_unlock(&local->socket_lock); | ||
590 | } | 652 | } |
591 | 653 | ||
592 | reason = LLCP_DM_NOBOUND; | 654 | lock_sock(&sock->sk); |
593 | goto fail; | ||
594 | 655 | ||
595 | enqueue: | ||
596 | parent = &sock->sk; | 656 | parent = &sock->sk; |
597 | 657 | ||
598 | if (sk_acceptq_is_full(parent)) { | 658 | if (sk_acceptq_is_full(parent)) { |
@@ -612,15 +672,19 @@ enqueue: | |||
612 | 672 | ||
613 | new_sock = nfc_llcp_sock(new_sk); | 673 | new_sock = nfc_llcp_sock(new_sk); |
614 | new_sock->dev = local->dev; | 674 | new_sock->dev = local->dev; |
615 | new_sock->local = local; | 675 | new_sock->local = nfc_llcp_local_get(local); |
676 | new_sock->miu = local->remote_miu; | ||
616 | new_sock->nfc_protocol = sock->nfc_protocol; | 677 | new_sock->nfc_protocol = sock->nfc_protocol; |
617 | new_sock->ssap = bound_sap; | 678 | new_sock->ssap = sock->ssap; |
618 | new_sock->dsap = ssap; | 679 | new_sock->dsap = ssap; |
619 | new_sock->parent = parent; | 680 | new_sock->parent = parent; |
620 | 681 | ||
682 | nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE], | ||
683 | skb->len - LLCP_HEADER_SIZE); | ||
684 | |||
621 | pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); | 685 | pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); |
622 | 686 | ||
623 | list_add_tail(&new_sock->list, &sock->list); | 687 | nfc_llcp_sock_link(&local->sockets, new_sk); |
624 | 688 | ||
625 | nfc_llcp_accept_enqueue(&sock->sk, new_sk); | 689 | nfc_llcp_accept_enqueue(&sock->sk, new_sk); |
626 | 690 | ||
@@ -654,12 +718,12 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) | |||
654 | 718 | ||
655 | pr_debug("Remote ready %d tx queue len %d remote rw %d", | 719 | pr_debug("Remote ready %d tx queue len %d remote rw %d", |
656 | sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), | 720 | sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), |
657 | local->remote_rw); | 721 | sock->rw); |
658 | 722 | ||
659 | /* Try to queue some I frames for transmission */ | 723 | /* Try to queue some I frames for transmission */ |
660 | while (sock->remote_ready && | 724 | while (sock->remote_ready && |
661 | skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) { | 725 | skb_queue_len(&sock->tx_pending_queue) < sock->rw) { |
662 | struct sk_buff *pdu, *pending_pdu; | 726 | struct sk_buff *pdu; |
663 | 727 | ||
664 | pdu = skb_dequeue(&sock->tx_queue); | 728 | pdu = skb_dequeue(&sock->tx_queue); |
665 | if (pdu == NULL) | 729 | if (pdu == NULL) |
@@ -668,10 +732,7 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) | |||
668 | /* Update N(S)/N(R) */ | 732 | /* Update N(S)/N(R) */ |
669 | nfc_llcp_set_nrns(sock, pdu); | 733 | nfc_llcp_set_nrns(sock, pdu); |
670 | 734 | ||
671 | pending_pdu = skb_clone(pdu, GFP_KERNEL); | ||
672 | |||
673 | skb_queue_tail(&local->tx_queue, pdu); | 735 | skb_queue_tail(&local->tx_queue, pdu); |
674 | skb_queue_tail(&sock->tx_pending_queue, pending_pdu); | ||
675 | nr_frames++; | 736 | nr_frames++; |
676 | } | 737 | } |
677 | 738 | ||
@@ -728,11 +789,21 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
728 | 789 | ||
729 | llcp_sock->send_ack_n = nr; | 790 | llcp_sock->send_ack_n = nr; |
730 | 791 | ||
731 | skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) | 792 | /* Remove and free all skbs until ns == nr */ |
732 | if (nfc_llcp_ns(s) <= nr) { | 793 | skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) { |
733 | skb_unlink(s, &llcp_sock->tx_pending_queue); | 794 | skb_unlink(s, &llcp_sock->tx_pending_queue); |
734 | kfree_skb(s); | 795 | kfree_skb(s); |
735 | } | 796 | |
797 | if (nfc_llcp_ns(s) == nr) | ||
798 | break; | ||
799 | } | ||
800 | |||
801 | /* Re-queue the remaining skbs for transmission */ | ||
802 | skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue, | ||
803 | s, tmp) { | ||
804 | skb_unlink(s, &llcp_sock->tx_pending_queue); | ||
805 | skb_queue_head(&local->tx_queue, s); | ||
806 | } | ||
736 | } | 807 | } |
737 | 808 | ||
738 | if (ptype == LLCP_PDU_RR) | 809 | if (ptype == LLCP_PDU_RR) |
@@ -740,7 +811,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
740 | else if (ptype == LLCP_PDU_RNR) | 811 | else if (ptype == LLCP_PDU_RNR) |
741 | llcp_sock->remote_ready = false; | 812 | llcp_sock->remote_ready = false; |
742 | 813 | ||
743 | if (nfc_llcp_queue_i_frames(llcp_sock) == 0) | 814 | if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I) |
744 | nfc_llcp_send_rr(llcp_sock); | 815 | nfc_llcp_send_rr(llcp_sock); |
745 | 816 | ||
746 | release_sock(sk); | 817 | release_sock(sk); |
@@ -791,11 +862,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb) | |||
791 | dsap = nfc_llcp_dsap(skb); | 862 | dsap = nfc_llcp_dsap(skb); |
792 | ssap = nfc_llcp_ssap(skb); | 863 | ssap = nfc_llcp_ssap(skb); |
793 | 864 | ||
794 | llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); | 865 | llcp_sock = nfc_llcp_connecting_sock_get(local, dsap); |
795 | |||
796 | if (llcp_sock == NULL) | ||
797 | llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); | ||
798 | |||
799 | if (llcp_sock == NULL) { | 866 | if (llcp_sock == NULL) { |
800 | pr_err("Invalid CC\n"); | 867 | pr_err("Invalid CC\n"); |
801 | nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); | 868 | nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); |
@@ -803,11 +870,15 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb) | |||
803 | return; | 870 | return; |
804 | } | 871 | } |
805 | 872 | ||
806 | llcp_sock->dsap = ssap; | ||
807 | sk = &llcp_sock->sk; | 873 | sk = &llcp_sock->sk; |
808 | 874 | ||
809 | nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], | 875 | /* Unlink from connecting and link to the client array */ |
810 | skb->len - LLCP_HEADER_SIZE); | 876 | nfc_llcp_sock_unlink(&local->connecting_sockets, sk); |
877 | nfc_llcp_sock_link(&local->sockets, sk); | ||
878 | llcp_sock->dsap = ssap; | ||
879 | |||
880 | nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE], | ||
881 | skb->len - LLCP_HEADER_SIZE); | ||
811 | 882 | ||
812 | sk->sk_state = LLCP_CONNECTED; | 883 | sk->sk_state = LLCP_CONNECTED; |
813 | sk->sk_state_change(sk); | 884 | sk->sk_state_change(sk); |
@@ -891,6 +962,21 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) | |||
891 | return; | 962 | return; |
892 | } | 963 | } |
893 | 964 | ||
965 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) | ||
966 | { | ||
967 | struct nfc_llcp_local *local; | ||
968 | |||
969 | local = nfc_llcp_find_local(dev); | ||
970 | if (local == NULL) | ||
971 | return -ENODEV; | ||
972 | |||
973 | local->rx_pending = skb_get(skb); | ||
974 | del_timer(&local->link_timer); | ||
975 | queue_work(local->rx_wq, &local->rx_work); | ||
976 | |||
977 | return 0; | ||
978 | } | ||
979 | |||
894 | void nfc_llcp_mac_is_down(struct nfc_dev *dev) | 980 | void nfc_llcp_mac_is_down(struct nfc_dev *dev) |
895 | { | 981 | { |
896 | struct nfc_llcp_local *local; | 982 | struct nfc_llcp_local *local; |
@@ -943,8 +1029,8 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) | |||
943 | 1029 | ||
944 | local->dev = ndev; | 1030 | local->dev = ndev; |
945 | INIT_LIST_HEAD(&local->list); | 1031 | INIT_LIST_HEAD(&local->list); |
1032 | kref_init(&local->ref); | ||
946 | mutex_init(&local->sdp_lock); | 1033 | mutex_init(&local->sdp_lock); |
947 | mutex_init(&local->socket_lock); | ||
948 | init_timer(&local->link_timer); | 1034 | init_timer(&local->link_timer); |
949 | local->link_timer.data = (unsigned long) local; | 1035 | local->link_timer.data = (unsigned long) local; |
950 | local->link_timer.function = nfc_llcp_symm_timer; | 1036 | local->link_timer.function = nfc_llcp_symm_timer; |
@@ -984,11 +1070,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) | |||
984 | goto err_rx_wq; | 1070 | goto err_rx_wq; |
985 | } | 1071 | } |
986 | 1072 | ||
1073 | local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock); | ||
1074 | local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock); | ||
1075 | |||
987 | nfc_llcp_build_gb(local); | 1076 | nfc_llcp_build_gb(local); |
988 | 1077 | ||
989 | local->remote_miu = LLCP_DEFAULT_MIU; | 1078 | local->remote_miu = LLCP_DEFAULT_MIU; |
990 | local->remote_lto = LLCP_DEFAULT_LTO; | 1079 | local->remote_lto = LLCP_DEFAULT_LTO; |
991 | local->remote_rw = LLCP_DEFAULT_RW; | ||
992 | 1080 | ||
993 | list_add(&llcp_devices, &local->list); | 1081 | list_add(&llcp_devices, &local->list); |
994 | 1082 | ||
@@ -1015,14 +1103,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) | |||
1015 | return; | 1103 | return; |
1016 | } | 1104 | } |
1017 | 1105 | ||
1018 | list_del(&local->list); | 1106 | nfc_llcp_local_put(local); |
1019 | nfc_llcp_socket_release(local); | ||
1020 | del_timer_sync(&local->link_timer); | ||
1021 | skb_queue_purge(&local->tx_queue); | ||
1022 | destroy_workqueue(local->tx_wq); | ||
1023 | destroy_workqueue(local->rx_wq); | ||
1024 | kfree_skb(local->rx_pending); | ||
1025 | kfree(local); | ||
1026 | } | 1107 | } |
1027 | 1108 | ||
1028 | int __init nfc_llcp_init(void) | 1109 | int __init nfc_llcp_init(void) |
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h index 50680ce5ae43..7286c86982ff 100644 --- a/net/nfc/llcp/llcp.h +++ b/net/nfc/llcp/llcp.h | |||
@@ -40,12 +40,18 @@ enum llcp_state { | |||
40 | 40 | ||
41 | struct nfc_llcp_sock; | 41 | struct nfc_llcp_sock; |
42 | 42 | ||
43 | struct llcp_sock_list { | ||
44 | struct hlist_head head; | ||
45 | rwlock_t lock; | ||
46 | }; | ||
47 | |||
43 | struct nfc_llcp_local { | 48 | struct nfc_llcp_local { |
44 | struct list_head list; | 49 | struct list_head list; |
45 | struct nfc_dev *dev; | 50 | struct nfc_dev *dev; |
46 | 51 | ||
52 | struct kref ref; | ||
53 | |||
47 | struct mutex sdp_lock; | 54 | struct mutex sdp_lock; |
48 | struct mutex socket_lock; | ||
49 | 55 | ||
50 | struct timer_list link_timer; | 56 | struct timer_list link_timer; |
51 | struct sk_buff_head tx_queue; | 57 | struct sk_buff_head tx_queue; |
@@ -77,24 +83,26 @@ struct nfc_llcp_local { | |||
77 | u16 remote_lto; | 83 | u16 remote_lto; |
78 | u8 remote_opt; | 84 | u8 remote_opt; |
79 | u16 remote_wks; | 85 | u16 remote_wks; |
80 | u8 remote_rw; | ||
81 | 86 | ||
82 | /* sockets array */ | 87 | /* sockets array */ |
83 | struct nfc_llcp_sock *sockets[LLCP_MAX_SAP]; | 88 | struct llcp_sock_list sockets; |
89 | struct llcp_sock_list connecting_sockets; | ||
84 | }; | 90 | }; |
85 | 91 | ||
86 | struct nfc_llcp_sock { | 92 | struct nfc_llcp_sock { |
87 | struct sock sk; | 93 | struct sock sk; |
88 | struct list_head list; | ||
89 | struct nfc_dev *dev; | 94 | struct nfc_dev *dev; |
90 | struct nfc_llcp_local *local; | 95 | struct nfc_llcp_local *local; |
91 | u32 target_idx; | 96 | u32 target_idx; |
92 | u32 nfc_protocol; | 97 | u32 nfc_protocol; |
93 | 98 | ||
99 | /* Link parameters */ | ||
94 | u8 ssap; | 100 | u8 ssap; |
95 | u8 dsap; | 101 | u8 dsap; |
96 | char *service_name; | 102 | char *service_name; |
97 | size_t service_name_len; | 103 | size_t service_name_len; |
104 | u8 rw; | ||
105 | u16 miu; | ||
98 | 106 | ||
99 | /* Link variables */ | 107 | /* Link variables */ |
100 | u8 send_n; | 108 | u8 send_n; |
@@ -164,7 +172,11 @@ struct nfc_llcp_sock { | |||
164 | #define LLCP_DM_REJ 0x03 | 172 | #define LLCP_DM_REJ 0x03 |
165 | 173 | ||
166 | 174 | ||
175 | void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s); | ||
176 | void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s); | ||
167 | struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); | 177 | struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); |
178 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local); | ||
179 | int nfc_llcp_local_put(struct nfc_llcp_local *local); | ||
168 | u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, | 180 | u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, |
169 | struct nfc_llcp_sock *sock); | 181 | struct nfc_llcp_sock *sock); |
170 | u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); | 182 | u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); |
@@ -179,8 +191,10 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk); | |||
179 | struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock); | 191 | struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock); |
180 | 192 | ||
181 | /* TLV API */ | 193 | /* TLV API */ |
182 | int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, | 194 | int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, |
183 | u8 *tlv_array, u16 tlv_array_len); | 195 | u8 *tlv_array, u16 tlv_array_len); |
196 | int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock, | ||
197 | u8 *tlv_array, u16 tlv_array_len); | ||
184 | 198 | ||
185 | /* Commands API */ | 199 | /* Commands API */ |
186 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); | 200 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 3f339b19d140..2c0b317344b7 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -111,7 +111,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | llcp_sock->dev = dev; | 113 | llcp_sock->dev = dev; |
114 | llcp_sock->local = local; | 114 | llcp_sock->local = nfc_llcp_local_get(local); |
115 | llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; | 115 | llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; |
116 | llcp_sock->service_name_len = min_t(unsigned int, | 116 | llcp_sock->service_name_len = min_t(unsigned int, |
117 | llcp_addr.service_name_len, | 117 | llcp_addr.service_name_len, |
@@ -124,7 +124,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | |||
124 | if (llcp_sock->ssap == LLCP_MAX_SAP) | 124 | if (llcp_sock->ssap == LLCP_MAX_SAP) |
125 | goto put_dev; | 125 | goto put_dev; |
126 | 126 | ||
127 | local->sockets[llcp_sock->ssap] = llcp_sock; | 127 | nfc_llcp_sock_link(&local->sockets, sk); |
128 | 128 | ||
129 | pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); | 129 | pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); |
130 | 130 | ||
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
292 | 292 | ||
293 | pr_debug("%p\n", sk); | 293 | pr_debug("%p\n", sk); |
294 | 294 | ||
295 | if (llcp_sock == NULL) | ||
296 | return -EBADFD; | ||
297 | |||
295 | addr->sa_family = AF_NFC; | 298 | addr->sa_family = AF_NFC; |
296 | *len = sizeof(struct sockaddr_nfc_llcp); | 299 | *len = sizeof(struct sockaddr_nfc_llcp); |
297 | 300 | ||
@@ -379,15 +382,6 @@ static int llcp_sock_release(struct socket *sock) | |||
379 | goto out; | 382 | goto out; |
380 | } | 383 | } |
381 | 384 | ||
382 | mutex_lock(&local->socket_lock); | ||
383 | |||
384 | if (llcp_sock == local->sockets[llcp_sock->ssap]) | ||
385 | local->sockets[llcp_sock->ssap] = NULL; | ||
386 | else | ||
387 | list_del_init(&llcp_sock->list); | ||
388 | |||
389 | mutex_unlock(&local->socket_lock); | ||
390 | |||
391 | lock_sock(sk); | 385 | lock_sock(sk); |
392 | 386 | ||
393 | /* Send a DISC */ | 387 | /* Send a DISC */ |
@@ -412,14 +406,12 @@ static int llcp_sock_release(struct socket *sock) | |||
412 | } | 406 | } |
413 | } | 407 | } |
414 | 408 | ||
415 | /* Freeing the SAP */ | 409 | nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); |
416 | if ((sk->sk_state == LLCP_CONNECTED | ||
417 | && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) || | ||
418 | sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) | ||
419 | nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); | ||
420 | 410 | ||
421 | release_sock(sk); | 411 | release_sock(sk); |
422 | 412 | ||
413 | nfc_llcp_sock_unlink(&local->sockets, sk); | ||
414 | |||
423 | out: | 415 | out: |
424 | sock_orphan(sk); | 416 | sock_orphan(sk); |
425 | sock_put(sk); | 417 | sock_put(sk); |
@@ -487,7 +479,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
487 | } | 479 | } |
488 | 480 | ||
489 | llcp_sock->dev = dev; | 481 | llcp_sock->dev = dev; |
490 | llcp_sock->local = local; | 482 | llcp_sock->local = nfc_llcp_local_get(local); |
483 | llcp_sock->miu = llcp_sock->local->remote_miu; | ||
491 | llcp_sock->ssap = nfc_llcp_get_local_ssap(local); | 484 | llcp_sock->ssap = nfc_llcp_get_local_ssap(local); |
492 | if (llcp_sock->ssap == LLCP_SAP_MAX) { | 485 | if (llcp_sock->ssap == LLCP_SAP_MAX) { |
493 | ret = -ENOMEM; | 486 | ret = -ENOMEM; |
@@ -505,21 +498,26 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
505 | llcp_sock->service_name_len, | 498 | llcp_sock->service_name_len, |
506 | GFP_KERNEL); | 499 | GFP_KERNEL); |
507 | 500 | ||
508 | local->sockets[llcp_sock->ssap] = llcp_sock; | 501 | nfc_llcp_sock_link(&local->connecting_sockets, sk); |
509 | 502 | ||
510 | ret = nfc_llcp_send_connect(llcp_sock); | 503 | ret = nfc_llcp_send_connect(llcp_sock); |
511 | if (ret) | 504 | if (ret) |
512 | goto put_dev; | 505 | goto sock_unlink; |
513 | 506 | ||
514 | ret = sock_wait_state(sk, LLCP_CONNECTED, | 507 | ret = sock_wait_state(sk, LLCP_CONNECTED, |
515 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 508 | sock_sndtimeo(sk, flags & O_NONBLOCK)); |
516 | if (ret) | 509 | if (ret) |
517 | goto put_dev; | 510 | goto sock_unlink; |
518 | 511 | ||
519 | release_sock(sk); | 512 | release_sock(sk); |
520 | 513 | ||
521 | return 0; | 514 | return 0; |
522 | 515 | ||
516 | sock_unlink: | ||
517 | nfc_llcp_put_ssap(local, llcp_sock->ssap); | ||
518 | |||
519 | nfc_llcp_sock_unlink(&local->connecting_sockets, sk); | ||
520 | |||
523 | put_dev: | 521 | put_dev: |
524 | nfc_put_device(dev); | 522 | nfc_put_device(dev); |
525 | 523 | ||
@@ -684,13 +682,14 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp) | |||
684 | 682 | ||
685 | llcp_sock->ssap = 0; | 683 | llcp_sock->ssap = 0; |
686 | llcp_sock->dsap = LLCP_SAP_SDP; | 684 | llcp_sock->dsap = LLCP_SAP_SDP; |
685 | llcp_sock->rw = LLCP_DEFAULT_RW; | ||
686 | llcp_sock->miu = LLCP_DEFAULT_MIU; | ||
687 | llcp_sock->send_n = llcp_sock->send_ack_n = 0; | 687 | llcp_sock->send_n = llcp_sock->send_ack_n = 0; |
688 | llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; | 688 | llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; |
689 | llcp_sock->remote_ready = 1; | 689 | llcp_sock->remote_ready = 1; |
690 | skb_queue_head_init(&llcp_sock->tx_queue); | 690 | skb_queue_head_init(&llcp_sock->tx_queue); |
691 | skb_queue_head_init(&llcp_sock->tx_pending_queue); | 691 | skb_queue_head_init(&llcp_sock->tx_pending_queue); |
692 | skb_queue_head_init(&llcp_sock->tx_backlog_queue); | 692 | skb_queue_head_init(&llcp_sock->tx_backlog_queue); |
693 | INIT_LIST_HEAD(&llcp_sock->list); | ||
694 | INIT_LIST_HEAD(&llcp_sock->accept_queue); | 693 | INIT_LIST_HEAD(&llcp_sock->accept_queue); |
695 | 694 | ||
696 | if (sock != NULL) | 695 | if (sock != NULL) |
@@ -701,8 +700,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp) | |||
701 | 700 | ||
702 | void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) | 701 | void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) |
703 | { | 702 | { |
704 | struct nfc_llcp_local *local = sock->local; | ||
705 | |||
706 | kfree(sock->service_name); | 703 | kfree(sock->service_name); |
707 | 704 | ||
708 | skb_queue_purge(&sock->tx_queue); | 705 | skb_queue_purge(&sock->tx_queue); |
@@ -711,12 +708,9 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) | |||
711 | 708 | ||
712 | list_del_init(&sock->accept_queue); | 709 | list_del_init(&sock->accept_queue); |
713 | 710 | ||
714 | if (local != NULL && sock == local->sockets[sock->ssap]) | ||
715 | local->sockets[sock->ssap] = NULL; | ||
716 | else | ||
717 | list_del_init(&sock->list); | ||
718 | |||
719 | sock->parent = NULL; | 711 | sock->parent = NULL; |
712 | |||
713 | nfc_llcp_local_put(sock->local); | ||
720 | } | 714 | } |
721 | 715 | ||
722 | static int llcp_sock_create(struct net *net, struct socket *sock, | 716 | static int llcp_sock_create(struct net *net, struct socket *sock, |
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index d560e6f13072..766a02b1dfa1 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c | |||
@@ -387,7 +387,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev) | |||
387 | return nci_close_device(ndev); | 387 | return nci_close_device(ndev); |
388 | } | 388 | } |
389 | 389 | ||
390 | static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) | 390 | static int nci_start_poll(struct nfc_dev *nfc_dev, |
391 | __u32 im_protocols, __u32 tm_protocols) | ||
391 | { | 392 | { |
392 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); | 393 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); |
393 | int rc; | 394 | int rc; |
@@ -413,11 +414,11 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) | |||
413 | return -EBUSY; | 414 | return -EBUSY; |
414 | } | 415 | } |
415 | 416 | ||
416 | rc = nci_request(ndev, nci_rf_discover_req, protocols, | 417 | rc = nci_request(ndev, nci_rf_discover_req, im_protocols, |
417 | msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); | 418 | msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); |
418 | 419 | ||
419 | if (!rc) | 420 | if (!rc) |
420 | ndev->poll_prots = protocols; | 421 | ndev->poll_prots = im_protocols; |
421 | 422 | ||
422 | return rc; | 423 | return rc; |
423 | } | 424 | } |
@@ -521,9 +522,9 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, | |||
521 | } | 522 | } |
522 | } | 523 | } |
523 | 524 | ||
524 | static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, | 525 | static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, |
525 | struct sk_buff *skb, | 526 | struct sk_buff *skb, |
526 | data_exchange_cb_t cb, void *cb_context) | 527 | data_exchange_cb_t cb, void *cb_context) |
527 | { | 528 | { |
528 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); | 529 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); |
529 | int rc; | 530 | int rc; |
@@ -556,7 +557,7 @@ static struct nfc_ops nci_nfc_ops = { | |||
556 | .stop_poll = nci_stop_poll, | 557 | .stop_poll = nci_stop_poll, |
557 | .activate_target = nci_activate_target, | 558 | .activate_target = nci_activate_target, |
558 | .deactivate_target = nci_deactivate_target, | 559 | .deactivate_target = nci_deactivate_target, |
559 | .data_exchange = nci_data_exchange, | 560 | .im_transceive = nci_transceive, |
560 | }; | 561 | }; |
561 | 562 | ||
562 | /* ---- Interface to NCI drivers ---- */ | 563 | /* ---- Interface to NCI drivers ---- */ |
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 581d419083aa..03c31db38f12 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c | |||
@@ -49,6 +49,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { | |||
49 | [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, | 49 | [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, |
50 | [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, | 50 | [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, |
51 | [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, | 51 | [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, |
52 | [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 }, | ||
53 | [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 }, | ||
52 | }; | 54 | }; |
53 | 55 | ||
54 | static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, | 56 | static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, |
@@ -219,6 +221,68 @@ free_msg: | |||
219 | return -EMSGSIZE; | 221 | return -EMSGSIZE; |
220 | } | 222 | } |
221 | 223 | ||
224 | int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol) | ||
225 | { | ||
226 | struct sk_buff *msg; | ||
227 | void *hdr; | ||
228 | |||
229 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
230 | if (!msg) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, | ||
234 | NFC_EVENT_TM_ACTIVATED); | ||
235 | if (!hdr) | ||
236 | goto free_msg; | ||
237 | |||
238 | if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) | ||
239 | goto nla_put_failure; | ||
240 | if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol)) | ||
241 | goto nla_put_failure; | ||
242 | |||
243 | genlmsg_end(msg, hdr); | ||
244 | |||
245 | genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); | ||
246 | |||
247 | return 0; | ||
248 | |||
249 | nla_put_failure: | ||
250 | genlmsg_cancel(msg, hdr); | ||
251 | free_msg: | ||
252 | nlmsg_free(msg); | ||
253 | return -EMSGSIZE; | ||
254 | } | ||
255 | |||
256 | int nfc_genl_tm_deactivated(struct nfc_dev *dev) | ||
257 | { | ||
258 | struct sk_buff *msg; | ||
259 | void *hdr; | ||
260 | |||
261 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
262 | if (!msg) | ||
263 | return -ENOMEM; | ||
264 | |||
265 | hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, | ||
266 | NFC_EVENT_TM_DEACTIVATED); | ||
267 | if (!hdr) | ||
268 | goto free_msg; | ||
269 | |||
270 | if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) | ||
271 | goto nla_put_failure; | ||
272 | |||
273 | genlmsg_end(msg, hdr); | ||
274 | |||
275 | genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); | ||
276 | |||
277 | return 0; | ||
278 | |||
279 | nla_put_failure: | ||
280 | genlmsg_cancel(msg, hdr); | ||
281 | free_msg: | ||
282 | nlmsg_free(msg); | ||
283 | return -EMSGSIZE; | ||
284 | } | ||
285 | |||
222 | int nfc_genl_device_added(struct nfc_dev *dev) | 286 | int nfc_genl_device_added(struct nfc_dev *dev) |
223 | { | 287 | { |
224 | struct sk_buff *msg; | 288 | struct sk_buff *msg; |
@@ -519,16 +583,25 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) | |||
519 | struct nfc_dev *dev; | 583 | struct nfc_dev *dev; |
520 | int rc; | 584 | int rc; |
521 | u32 idx; | 585 | u32 idx; |
522 | u32 protocols; | 586 | u32 im_protocols = 0, tm_protocols = 0; |
523 | 587 | ||
524 | pr_debug("Poll start\n"); | 588 | pr_debug("Poll start\n"); |
525 | 589 | ||
526 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || | 590 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || |
527 | !info->attrs[NFC_ATTR_PROTOCOLS]) | 591 | ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && |
592 | !info->attrs[NFC_ATTR_PROTOCOLS]) && | ||
593 | !info->attrs[NFC_ATTR_TM_PROTOCOLS])) | ||
528 | return -EINVAL; | 594 | return -EINVAL; |
529 | 595 | ||
530 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | 596 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); |
531 | protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); | 597 | |
598 | if (info->attrs[NFC_ATTR_TM_PROTOCOLS]) | ||
599 | tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]); | ||
600 | |||
601 | if (info->attrs[NFC_ATTR_IM_PROTOCOLS]) | ||
602 | im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]); | ||
603 | else if (info->attrs[NFC_ATTR_PROTOCOLS]) | ||
604 | im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); | ||
532 | 605 | ||
533 | dev = nfc_get_device(idx); | 606 | dev = nfc_get_device(idx); |
534 | if (!dev) | 607 | if (!dev) |
@@ -536,7 +609,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) | |||
536 | 609 | ||
537 | mutex_lock(&dev->genl_data.genl_data_mutex); | 610 | mutex_lock(&dev->genl_data.genl_data_mutex); |
538 | 611 | ||
539 | rc = nfc_start_poll(dev, protocols); | 612 | rc = nfc_start_poll(dev, im_protocols, tm_protocols); |
540 | if (!rc) | 613 | if (!rc) |
541 | dev->genl_data.poll_req_pid = info->snd_pid; | 614 | dev->genl_data.poll_req_pid = info->snd_pid; |
542 | 615 | ||
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index 3dd4232ae664..c5e42b79a418 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h | |||
@@ -55,6 +55,7 @@ int nfc_llcp_register_device(struct nfc_dev *dev); | |||
55 | void nfc_llcp_unregister_device(struct nfc_dev *dev); | 55 | void nfc_llcp_unregister_device(struct nfc_dev *dev); |
56 | int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); | 56 | int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); |
57 | u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); | 57 | u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); |
58 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); | ||
58 | int __init nfc_llcp_init(void); | 59 | int __init nfc_llcp_init(void); |
59 | void nfc_llcp_exit(void); | 60 | void nfc_llcp_exit(void); |
60 | 61 | ||
@@ -90,6 +91,12 @@ static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len) | |||
90 | return NULL; | 91 | return NULL; |
91 | } | 92 | } |
92 | 93 | ||
94 | static inline int nfc_llcp_data_received(struct nfc_dev *dev, | ||
95 | struct sk_buff *skb) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | |||
93 | static inline int nfc_llcp_init(void) | 100 | static inline int nfc_llcp_init(void) |
94 | { | 101 | { |
95 | return 0; | 102 | return 0; |
@@ -128,6 +135,9 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, | |||
128 | u8 comm_mode, u8 rf_mode); | 135 | u8 comm_mode, u8 rf_mode); |
129 | int nfc_genl_dep_link_down_event(struct nfc_dev *dev); | 136 | int nfc_genl_dep_link_down_event(struct nfc_dev *dev); |
130 | 137 | ||
138 | int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol); | ||
139 | int nfc_genl_tm_deactivated(struct nfc_dev *dev); | ||
140 | |||
131 | struct nfc_dev *nfc_get_device(unsigned int idx); | 141 | struct nfc_dev *nfc_get_device(unsigned int idx); |
132 | 142 | ||
133 | static inline void nfc_put_device(struct nfc_dev *dev) | 143 | static inline void nfc_put_device(struct nfc_dev *dev) |
@@ -158,7 +168,7 @@ int nfc_dev_up(struct nfc_dev *dev); | |||
158 | 168 | ||
159 | int nfc_dev_down(struct nfc_dev *dev); | 169 | int nfc_dev_down(struct nfc_dev *dev); |
160 | 170 | ||
161 | int nfc_start_poll(struct nfc_dev *dev, u32 protocols); | 171 | int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols); |
162 | 172 | ||
163 | int nfc_stop_poll(struct nfc_dev *dev); | 173 | int nfc_stop_poll(struct nfc_dev *dev); |
164 | 174 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 15f347477a99..baf5704740ee 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void) | |||
1389 | spin_unlock(®_requests_lock); | 1389 | spin_unlock(®_requests_lock); |
1390 | 1390 | ||
1391 | if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) | 1391 | if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) |
1392 | cancel_delayed_work_sync(®_timeout); | 1392 | cancel_delayed_work(®_timeout); |
1393 | 1393 | ||
1394 | if (need_more_processing) | 1394 | if (need_more_processing) |
1395 | schedule_work(®_work); | 1395 | schedule_work(®_work); |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 8f2d68fc3a44..316cfd00914f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, | |||
804 | ntype == NL80211_IFTYPE_P2P_CLIENT)) | 804 | ntype == NL80211_IFTYPE_P2P_CLIENT)) |
805 | return -EBUSY; | 805 | return -EBUSY; |
806 | 806 | ||
807 | if (ntype != otype) { | 807 | if (ntype != otype && netif_running(dev)) { |
808 | err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, | 808 | err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, |
809 | ntype); | 809 | ntype); |
810 | if (err) | 810 | if (err) |