diff options
Diffstat (limited to 'net/bluetooth')
-rw-r--r-- | net/bluetooth/Kconfig | 37 | ||||
-rw-r--r-- | net/bluetooth/Makefile | 5 | ||||
-rw-r--r-- | net/bluetooth/af_bluetooth.c | 11 | ||||
-rw-r--r-- | net/bluetooth/bnep/Kconfig | 2 | ||||
-rw-r--r-- | net/bluetooth/bnep/core.c | 17 | ||||
-rw-r--r-- | net/bluetooth/cmtp/Kconfig | 2 | ||||
-rw-r--r-- | net/bluetooth/cmtp/core.c | 13 | ||||
-rw-r--r-- | net/bluetooth/hci_conn.c | 186 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 707 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 474 | ||||
-rw-r--r-- | net/bluetooth/hci_sock.c | 23 | ||||
-rw-r--r-- | net/bluetooth/hci_sysfs.c | 131 | ||||
-rw-r--r-- | net/bluetooth/hidp/Kconfig | 2 | ||||
-rw-r--r-- | net/bluetooth/hidp/core.c | 157 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 1458 | ||||
-rw-r--r-- | net/bluetooth/l2cap_sock.c | 168 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 1730 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/Kconfig | 2 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/core.c | 36 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/sock.c | 2 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/tty.c | 45 | ||||
-rw-r--r-- | net/bluetooth/sco.c | 44 | ||||
-rw-r--r-- | net/bluetooth/smp.c | 268 |
23 files changed, 3469 insertions, 2051 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index bfb3dc03c9de..9ec85eb8853d 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig | |||
@@ -6,7 +6,11 @@ menuconfig BT | |||
6 | tristate "Bluetooth subsystem support" | 6 | tristate "Bluetooth subsystem support" |
7 | depends on NET && !S390 | 7 | depends on NET && !S390 |
8 | depends on RFKILL || !RFKILL | 8 | depends on RFKILL || !RFKILL |
9 | select CRC16 | ||
9 | select CRYPTO | 10 | select CRYPTO |
11 | select CRYPTO_BLKCIPHER | ||
12 | select CRYPTO_AES | ||
13 | select CRYPTO_ECB | ||
10 | help | 14 | help |
11 | Bluetooth is low-cost, low-power, short-range wireless technology. | 15 | Bluetooth is low-cost, low-power, short-range wireless technology. |
12 | It was designed as a replacement for cables and other short-range | 16 | It was designed as a replacement for cables and other short-range |
@@ -15,10 +19,12 @@ menuconfig BT | |||
15 | Bluetooth can be found at <http://www.bluetooth.com/>. | 19 | Bluetooth can be found at <http://www.bluetooth.com/>. |
16 | 20 | ||
17 | Linux Bluetooth subsystem consist of several layers: | 21 | Linux Bluetooth subsystem consist of several layers: |
18 | Bluetooth Core (HCI device and connection manager, scheduler) | 22 | Bluetooth Core |
23 | HCI device and connection manager, scheduler | ||
24 | SCO audio links | ||
25 | L2CAP (Logical Link Control and Adaptation Protocol) | ||
26 | SMP (Security Manager Protocol) on LE (Low Energy) links | ||
19 | HCI Device drivers (Interface to the hardware) | 27 | HCI Device drivers (Interface to the hardware) |
20 | SCO Module (SCO audio links) | ||
21 | L2CAP Module (Logical Link Control and Adaptation Protocol) | ||
22 | RFCOMM Module (RFCOMM Protocol) | 28 | RFCOMM Module (RFCOMM Protocol) |
23 | BNEP Module (Bluetooth Network Encapsulation Protocol) | 29 | BNEP Module (Bluetooth Network Encapsulation Protocol) |
24 | CMTP Module (CAPI Message Transport Protocol) | 30 | CMTP Module (CAPI Message Transport Protocol) |
@@ -33,31 +39,6 @@ menuconfig BT | |||
33 | to Bluetooth kernel modules are provided in the BlueZ packages. For | 39 | to Bluetooth kernel modules are provided in the BlueZ packages. For |
34 | more information, see <http://www.bluez.org/>. | 40 | more information, see <http://www.bluez.org/>. |
35 | 41 | ||
36 | if BT != n | ||
37 | |||
38 | config BT_L2CAP | ||
39 | bool "L2CAP protocol support" | ||
40 | select CRC16 | ||
41 | select CRYPTO | ||
42 | select CRYPTO_BLKCIPHER | ||
43 | select CRYPTO_AES | ||
44 | select CRYPTO_ECB | ||
45 | help | ||
46 | L2CAP (Logical Link Control and Adaptation Protocol) provides | ||
47 | connection oriented and connection-less data transport. L2CAP | ||
48 | support is required for most Bluetooth applications. | ||
49 | |||
50 | Also included is support for SMP (Security Manager Protocol) which | ||
51 | is the security layer on top of LE (Low Energy) links. | ||
52 | |||
53 | config BT_SCO | ||
54 | bool "SCO links support" | ||
55 | help | ||
56 | SCO link provides voice transport over Bluetooth. SCO support is | ||
57 | required for voice applications like Headset and Audio. | ||
58 | |||
59 | endif | ||
60 | |||
61 | source "net/bluetooth/rfcomm/Kconfig" | 42 | source "net/bluetooth/rfcomm/Kconfig" |
62 | 43 | ||
63 | source "net/bluetooth/bnep/Kconfig" | 44 | source "net/bluetooth/bnep/Kconfig" |
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 9b67f3d08fa4..2dc5a5700f53 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile | |||
@@ -8,6 +8,5 @@ obj-$(CONFIG_BT_BNEP) += bnep/ | |||
8 | obj-$(CONFIG_BT_CMTP) += cmtp/ | 8 | obj-$(CONFIG_BT_CMTP) += cmtp/ |
9 | obj-$(CONFIG_BT_HIDP) += hidp/ | 9 | obj-$(CONFIG_BT_HIDP) += hidp/ |
10 | 10 | ||
11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o | 11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ |
12 | bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o | 12 | hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o |
13 | bluetooth-$(CONFIG_BT_SCO) += sco.o | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 062124cd89cf..cdcfcabb34ab 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -199,15 +199,14 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
199 | 199 | ||
200 | BT_DBG("parent %p", parent); | 200 | BT_DBG("parent %p", parent); |
201 | 201 | ||
202 | local_bh_disable(); | ||
203 | list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { | 202 | list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { |
204 | sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); | 203 | sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); |
205 | 204 | ||
206 | bh_lock_sock(sk); | 205 | lock_sock(sk); |
207 | 206 | ||
208 | /* FIXME: Is this check still needed */ | 207 | /* FIXME: Is this check still needed */ |
209 | if (sk->sk_state == BT_CLOSED) { | 208 | if (sk->sk_state == BT_CLOSED) { |
210 | bh_unlock_sock(sk); | 209 | release_sock(sk); |
211 | bt_accept_unlink(sk); | 210 | bt_accept_unlink(sk); |
212 | continue; | 211 | continue; |
213 | } | 212 | } |
@@ -218,14 +217,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
218 | if (newsock) | 217 | if (newsock) |
219 | sock_graft(sk, newsock); | 218 | sock_graft(sk, newsock); |
220 | 219 | ||
221 | bh_unlock_sock(sk); | 220 | release_sock(sk); |
222 | local_bh_enable(); | ||
223 | return sk; | 221 | return sk; |
224 | } | 222 | } |
225 | 223 | ||
226 | bh_unlock_sock(sk); | 224 | release_sock(sk); |
227 | } | 225 | } |
228 | local_bh_enable(); | ||
229 | 226 | ||
230 | return NULL; | 227 | return NULL; |
231 | } | 228 | } |
diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig index 35158b036d54..71791fc9f6b1 100644 --- a/net/bluetooth/bnep/Kconfig +++ b/net/bluetooth/bnep/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_BNEP | 1 | config BT_BNEP |
2 | tristate "BNEP protocol support" | 2 | tristate "BNEP protocol support" |
3 | depends on BT && BT_L2CAP | 3 | depends on BT |
4 | select CRC32 | 4 | select CRC32 |
5 | help | 5 | help |
6 | BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet | 6 | BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 1eea8208b2cc..a779ec703323 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -56,8 +56,8 @@ | |||
56 | 56 | ||
57 | #define VERSION "1.3" | 57 | #define VERSION "1.3" |
58 | 58 | ||
59 | static int compress_src = 1; | 59 | static bool compress_src = true; |
60 | static int compress_dst = 1; | 60 | static bool compress_dst = true; |
61 | 61 | ||
62 | static LIST_HEAD(bnep_session_list); | 62 | static LIST_HEAD(bnep_session_list); |
63 | static DECLARE_RWSEM(bnep_session_sem); | 63 | static DECLARE_RWSEM(bnep_session_sem); |
@@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem); | |||
65 | static struct bnep_session *__bnep_get_session(u8 *dst) | 65 | static struct bnep_session *__bnep_get_session(u8 *dst) |
66 | { | 66 | { |
67 | struct bnep_session *s; | 67 | struct bnep_session *s; |
68 | struct list_head *p; | ||
69 | 68 | ||
70 | BT_DBG(""); | 69 | BT_DBG(""); |
71 | 70 | ||
72 | list_for_each(p, &bnep_session_list) { | 71 | list_for_each_entry(s, &bnep_session_list, list) |
73 | s = list_entry(p, struct bnep_session, list); | ||
74 | if (!compare_ether_addr(dst, s->eh.h_source)) | 72 | if (!compare_ether_addr(dst, s->eh.h_source)) |
75 | return s; | 73 | return s; |
76 | } | 74 | |
77 | return NULL; | 75 | return NULL; |
78 | } | 76 | } |
79 | 77 | ||
@@ -665,17 +663,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) | |||
665 | 663 | ||
666 | int bnep_get_connlist(struct bnep_connlist_req *req) | 664 | int bnep_get_connlist(struct bnep_connlist_req *req) |
667 | { | 665 | { |
668 | struct list_head *p; | 666 | struct bnep_session *s; |
669 | int err = 0, n = 0; | 667 | int err = 0, n = 0; |
670 | 668 | ||
671 | down_read(&bnep_session_sem); | 669 | down_read(&bnep_session_sem); |
672 | 670 | ||
673 | list_for_each(p, &bnep_session_list) { | 671 | list_for_each_entry(s, &bnep_session_list, list) { |
674 | struct bnep_session *s; | ||
675 | struct bnep_conninfo ci; | 672 | struct bnep_conninfo ci; |
676 | 673 | ||
677 | s = list_entry(p, struct bnep_session, list); | ||
678 | |||
679 | __bnep_copy_ci(&ci, s); | 674 | __bnep_copy_ci(&ci, s); |
680 | 675 | ||
681 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { | 676 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { |
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig index d6b0382f6f3a..94cbf42ce155 100644 --- a/net/bluetooth/cmtp/Kconfig +++ b/net/bluetooth/cmtp/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_CMTP | 1 | config BT_CMTP |
2 | tristate "CMTP protocol support" | 2 | tristate "CMTP protocol support" |
3 | depends on BT && BT_L2CAP && ISDN_CAPI | 3 | depends on BT && ISDN_CAPI |
4 | help | 4 | help |
5 | CMTP (CAPI Message Transport Protocol) is a transport layer | 5 | CMTP (CAPI Message Transport Protocol) is a transport layer |
6 | for CAPI messages. CMTP is required for the Bluetooth Common | 6 | for CAPI messages. CMTP is required for the Bluetooth Common |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 5a6e634f7fca..6c9c1fd601ca 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list); | |||
53 | static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) | 53 | static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) |
54 | { | 54 | { |
55 | struct cmtp_session *session; | 55 | struct cmtp_session *session; |
56 | struct list_head *p; | ||
57 | 56 | ||
58 | BT_DBG(""); | 57 | BT_DBG(""); |
59 | 58 | ||
60 | list_for_each(p, &cmtp_session_list) { | 59 | list_for_each_entry(session, &cmtp_session_list, list) |
61 | session = list_entry(p, struct cmtp_session, list); | ||
62 | if (!bacmp(bdaddr, &session->bdaddr)) | 60 | if (!bacmp(bdaddr, &session->bdaddr)) |
63 | return session; | 61 | return session; |
64 | } | 62 | |
65 | return NULL; | 63 | return NULL; |
66 | } | 64 | } |
67 | 65 | ||
@@ -432,19 +430,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) | |||
432 | 430 | ||
433 | int cmtp_get_connlist(struct cmtp_connlist_req *req) | 431 | int cmtp_get_connlist(struct cmtp_connlist_req *req) |
434 | { | 432 | { |
435 | struct list_head *p; | 433 | struct cmtp_session *session; |
436 | int err = 0, n = 0; | 434 | int err = 0, n = 0; |
437 | 435 | ||
438 | BT_DBG(""); | 436 | BT_DBG(""); |
439 | 437 | ||
440 | down_read(&cmtp_session_sem); | 438 | down_read(&cmtp_session_sem); |
441 | 439 | ||
442 | list_for_each(p, &cmtp_session_list) { | 440 | list_for_each_entry(session, &cmtp_session_list, list) { |
443 | struct cmtp_session *session; | ||
444 | struct cmtp_conninfo ci; | 441 | struct cmtp_conninfo ci; |
445 | 442 | ||
446 | session = list_entry(p, struct cmtp_session, list); | ||
447 | |||
448 | __cmtp_copy_session(session, &ci); | 443 | __cmtp_copy_session(session, &ci); |
449 | 444 | ||
450 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { | 445 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index c1c597e3e198..3db432473ad5 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn) | |||
123 | 123 | ||
124 | BT_DBG("%p", conn); | 124 | BT_DBG("%p", conn); |
125 | 125 | ||
126 | if (conn->hdev->hci_ver < 2) | 126 | if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) |
127 | return; | 127 | return; |
128 | 128 | ||
129 | bacpy(&cp.bdaddr, &conn->dst); | 129 | bacpy(&cp.bdaddr, &conn->dst); |
@@ -275,9 +275,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) | |||
275 | } | 275 | } |
276 | } | 276 | } |
277 | 277 | ||
278 | static void hci_conn_timeout(unsigned long arg) | 278 | static void hci_conn_timeout(struct work_struct *work) |
279 | { | 279 | { |
280 | struct hci_conn *conn = (void *) arg; | 280 | struct hci_conn *conn = container_of(work, struct hci_conn, |
281 | disc_work.work); | ||
281 | struct hci_dev *hdev = conn->hdev; | 282 | struct hci_dev *hdev = conn->hdev; |
282 | __u8 reason; | 283 | __u8 reason; |
283 | 284 | ||
@@ -311,6 +312,42 @@ static void hci_conn_timeout(unsigned long arg) | |||
311 | hci_dev_unlock(hdev); | 312 | hci_dev_unlock(hdev); |
312 | } | 313 | } |
313 | 314 | ||
315 | /* Enter sniff mode */ | ||
316 | static void hci_conn_enter_sniff_mode(struct hci_conn *conn) | ||
317 | { | ||
318 | struct hci_dev *hdev = conn->hdev; | ||
319 | |||
320 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
321 | |||
322 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
323 | return; | ||
324 | |||
325 | if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) | ||
326 | return; | ||
327 | |||
328 | if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) | ||
329 | return; | ||
330 | |||
331 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { | ||
332 | struct hci_cp_sniff_subrate cp; | ||
333 | cp.handle = cpu_to_le16(conn->handle); | ||
334 | cp.max_latency = cpu_to_le16(0); | ||
335 | cp.min_remote_timeout = cpu_to_le16(0); | ||
336 | cp.min_local_timeout = cpu_to_le16(0); | ||
337 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); | ||
338 | } | ||
339 | |||
340 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
341 | struct hci_cp_sniff_mode cp; | ||
342 | cp.handle = cpu_to_le16(conn->handle); | ||
343 | cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); | ||
344 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); | ||
345 | cp.attempt = cpu_to_le16(4); | ||
346 | cp.timeout = cpu_to_le16(1); | ||
347 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); | ||
348 | } | ||
349 | } | ||
350 | |||
314 | static void hci_conn_idle(unsigned long arg) | 351 | static void hci_conn_idle(unsigned long arg) |
315 | { | 352 | { |
316 | struct hci_conn *conn = (void *) arg; | 353 | struct hci_conn *conn = (void *) arg; |
@@ -325,12 +362,8 @@ static void hci_conn_auto_accept(unsigned long arg) | |||
325 | struct hci_conn *conn = (void *) arg; | 362 | struct hci_conn *conn = (void *) arg; |
326 | struct hci_dev *hdev = conn->hdev; | 363 | struct hci_dev *hdev = conn->hdev; |
327 | 364 | ||
328 | hci_dev_lock(hdev); | ||
329 | |||
330 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), | 365 | hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), |
331 | &conn->dst); | 366 | &conn->dst); |
332 | |||
333 | hci_dev_unlock(hdev); | ||
334 | } | 367 | } |
335 | 368 | ||
336 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | 369 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) |
@@ -374,7 +407,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
374 | 407 | ||
375 | skb_queue_head_init(&conn->data_q); | 408 | skb_queue_head_init(&conn->data_q); |
376 | 409 | ||
377 | setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); | 410 | INIT_LIST_HEAD(&conn->chan_list);; |
411 | |||
412 | INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); | ||
378 | setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); | 413 | setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); |
379 | setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, | 414 | setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, |
380 | (unsigned long) conn); | 415 | (unsigned long) conn); |
@@ -383,8 +418,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
383 | 418 | ||
384 | hci_dev_hold(hdev); | 419 | hci_dev_hold(hdev); |
385 | 420 | ||
386 | tasklet_disable(&hdev->tx_task); | ||
387 | |||
388 | hci_conn_hash_add(hdev, conn); | 421 | hci_conn_hash_add(hdev, conn); |
389 | if (hdev->notify) | 422 | if (hdev->notify) |
390 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 423 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
@@ -393,8 +426,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
393 | 426 | ||
394 | hci_conn_init_sysfs(conn); | 427 | hci_conn_init_sysfs(conn); |
395 | 428 | ||
396 | tasklet_enable(&hdev->tx_task); | ||
397 | |||
398 | return conn; | 429 | return conn; |
399 | } | 430 | } |
400 | 431 | ||
@@ -406,7 +437,7 @@ int hci_conn_del(struct hci_conn *conn) | |||
406 | 437 | ||
407 | del_timer(&conn->idle_timer); | 438 | del_timer(&conn->idle_timer); |
408 | 439 | ||
409 | del_timer(&conn->disc_timer); | 440 | cancel_delayed_work_sync(&conn->disc_work); |
410 | 441 | ||
411 | del_timer(&conn->auto_accept_timer); | 442 | del_timer(&conn->auto_accept_timer); |
412 | 443 | ||
@@ -430,14 +461,13 @@ int hci_conn_del(struct hci_conn *conn) | |||
430 | } | 461 | } |
431 | } | 462 | } |
432 | 463 | ||
433 | tasklet_disable(&hdev->tx_task); | 464 | |
465 | hci_chan_list_flush(conn); | ||
434 | 466 | ||
435 | hci_conn_hash_del(hdev, conn); | 467 | hci_conn_hash_del(hdev, conn); |
436 | if (hdev->notify) | 468 | if (hdev->notify) |
437 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); | 469 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); |
438 | 470 | ||
439 | tasklet_enable(&hdev->tx_task); | ||
440 | |||
441 | skb_queue_purge(&conn->data_q); | 471 | skb_queue_purge(&conn->data_q); |
442 | 472 | ||
443 | hci_conn_put_device(conn); | 473 | hci_conn_put_device(conn); |
@@ -453,16 +483,13 @@ int hci_conn_del(struct hci_conn *conn) | |||
453 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | 483 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) |
454 | { | 484 | { |
455 | int use_src = bacmp(src, BDADDR_ANY); | 485 | int use_src = bacmp(src, BDADDR_ANY); |
456 | struct hci_dev *hdev = NULL; | 486 | struct hci_dev *hdev = NULL, *d; |
457 | struct list_head *p; | ||
458 | 487 | ||
459 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); | 488 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); |
460 | 489 | ||
461 | read_lock_bh(&hci_dev_list_lock); | 490 | read_lock(&hci_dev_list_lock); |
462 | |||
463 | list_for_each(p, &hci_dev_list) { | ||
464 | struct hci_dev *d = list_entry(p, struct hci_dev, list); | ||
465 | 491 | ||
492 | list_for_each_entry(d, &hci_dev_list, list) { | ||
466 | if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) | 493 | if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) |
467 | continue; | 494 | continue; |
468 | 495 | ||
@@ -485,7 +512,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | |||
485 | if (hdev) | 512 | if (hdev) |
486 | hdev = hci_dev_hold(hdev); | 513 | hdev = hci_dev_hold(hdev); |
487 | 514 | ||
488 | read_unlock_bh(&hci_dev_list_lock); | 515 | read_unlock(&hci_dev_list_lock); |
489 | return hdev; | 516 | return hdev; |
490 | } | 517 | } |
491 | EXPORT_SYMBOL(hci_get_route); | 518 | EXPORT_SYMBOL(hci_get_route); |
@@ -766,60 +793,18 @@ timer: | |||
766 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); | 793 | jiffies + msecs_to_jiffies(hdev->idle_timeout)); |
767 | } | 794 | } |
768 | 795 | ||
769 | /* Enter sniff mode */ | ||
770 | void hci_conn_enter_sniff_mode(struct hci_conn *conn) | ||
771 | { | ||
772 | struct hci_dev *hdev = conn->hdev; | ||
773 | |||
774 | BT_DBG("conn %p mode %d", conn, conn->mode); | ||
775 | |||
776 | if (test_bit(HCI_RAW, &hdev->flags)) | ||
777 | return; | ||
778 | |||
779 | if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) | ||
780 | return; | ||
781 | |||
782 | if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) | ||
783 | return; | ||
784 | |||
785 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { | ||
786 | struct hci_cp_sniff_subrate cp; | ||
787 | cp.handle = cpu_to_le16(conn->handle); | ||
788 | cp.max_latency = cpu_to_le16(0); | ||
789 | cp.min_remote_timeout = cpu_to_le16(0); | ||
790 | cp.min_local_timeout = cpu_to_le16(0); | ||
791 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); | ||
792 | } | ||
793 | |||
794 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { | ||
795 | struct hci_cp_sniff_mode cp; | ||
796 | cp.handle = cpu_to_le16(conn->handle); | ||
797 | cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); | ||
798 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); | ||
799 | cp.attempt = cpu_to_le16(4); | ||
800 | cp.timeout = cpu_to_le16(1); | ||
801 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); | ||
802 | } | ||
803 | } | ||
804 | |||
805 | /* Drop all connection on the device */ | 796 | /* Drop all connection on the device */ |
806 | void hci_conn_hash_flush(struct hci_dev *hdev) | 797 | void hci_conn_hash_flush(struct hci_dev *hdev) |
807 | { | 798 | { |
808 | struct hci_conn_hash *h = &hdev->conn_hash; | 799 | struct hci_conn_hash *h = &hdev->conn_hash; |
809 | struct list_head *p; | 800 | struct hci_conn *c; |
810 | 801 | ||
811 | BT_DBG("hdev %s", hdev->name); | 802 | BT_DBG("hdev %s", hdev->name); |
812 | 803 | ||
813 | p = h->list.next; | 804 | list_for_each_entry_rcu(c, &h->list, list) { |
814 | while (p != &h->list) { | ||
815 | struct hci_conn *c; | ||
816 | |||
817 | c = list_entry(p, struct hci_conn, list); | ||
818 | p = p->next; | ||
819 | |||
820 | c->state = BT_CLOSED; | 805 | c->state = BT_CLOSED; |
821 | 806 | ||
822 | hci_proto_disconn_cfm(c, 0x16); | 807 | hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); |
823 | hci_conn_del(c); | 808 | hci_conn_del(c); |
824 | } | 809 | } |
825 | } | 810 | } |
@@ -855,10 +840,10 @@ EXPORT_SYMBOL(hci_conn_put_device); | |||
855 | 840 | ||
856 | int hci_get_conn_list(void __user *arg) | 841 | int hci_get_conn_list(void __user *arg) |
857 | { | 842 | { |
843 | register struct hci_conn *c; | ||
858 | struct hci_conn_list_req req, *cl; | 844 | struct hci_conn_list_req req, *cl; |
859 | struct hci_conn_info *ci; | 845 | struct hci_conn_info *ci; |
860 | struct hci_dev *hdev; | 846 | struct hci_dev *hdev; |
861 | struct list_head *p; | ||
862 | int n = 0, size, err; | 847 | int n = 0, size, err; |
863 | 848 | ||
864 | if (copy_from_user(&req, arg, sizeof(req))) | 849 | if (copy_from_user(&req, arg, sizeof(req))) |
@@ -881,11 +866,8 @@ int hci_get_conn_list(void __user *arg) | |||
881 | 866 | ||
882 | ci = cl->conn_info; | 867 | ci = cl->conn_info; |
883 | 868 | ||
884 | hci_dev_lock_bh(hdev); | 869 | hci_dev_lock(hdev); |
885 | list_for_each(p, &hdev->conn_hash.list) { | 870 | list_for_each_entry(c, &hdev->conn_hash.list, list) { |
886 | register struct hci_conn *c; | ||
887 | c = list_entry(p, struct hci_conn, list); | ||
888 | |||
889 | bacpy(&(ci + n)->bdaddr, &c->dst); | 871 | bacpy(&(ci + n)->bdaddr, &c->dst); |
890 | (ci + n)->handle = c->handle; | 872 | (ci + n)->handle = c->handle; |
891 | (ci + n)->type = c->type; | 873 | (ci + n)->type = c->type; |
@@ -895,7 +877,7 @@ int hci_get_conn_list(void __user *arg) | |||
895 | if (++n >= req.conn_num) | 877 | if (++n >= req.conn_num) |
896 | break; | 878 | break; |
897 | } | 879 | } |
898 | hci_dev_unlock_bh(hdev); | 880 | hci_dev_unlock(hdev); |
899 | 881 | ||
900 | cl->dev_id = hdev->id; | 882 | cl->dev_id = hdev->id; |
901 | cl->conn_num = n; | 883 | cl->conn_num = n; |
@@ -919,7 +901,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) | |||
919 | if (copy_from_user(&req, arg, sizeof(req))) | 901 | if (copy_from_user(&req, arg, sizeof(req))) |
920 | return -EFAULT; | 902 | return -EFAULT; |
921 | 903 | ||
922 | hci_dev_lock_bh(hdev); | 904 | hci_dev_lock(hdev); |
923 | conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); | 905 | conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); |
924 | if (conn) { | 906 | if (conn) { |
925 | bacpy(&ci.bdaddr, &conn->dst); | 907 | bacpy(&ci.bdaddr, &conn->dst); |
@@ -929,7 +911,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) | |||
929 | ci.state = conn->state; | 911 | ci.state = conn->state; |
930 | ci.link_mode = conn->link_mode; | 912 | ci.link_mode = conn->link_mode; |
931 | } | 913 | } |
932 | hci_dev_unlock_bh(hdev); | 914 | hci_dev_unlock(hdev); |
933 | 915 | ||
934 | if (!conn) | 916 | if (!conn) |
935 | return -ENOENT; | 917 | return -ENOENT; |
@@ -945,14 +927,60 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) | |||
945 | if (copy_from_user(&req, arg, sizeof(req))) | 927 | if (copy_from_user(&req, arg, sizeof(req))) |
946 | return -EFAULT; | 928 | return -EFAULT; |
947 | 929 | ||
948 | hci_dev_lock_bh(hdev); | 930 | hci_dev_lock(hdev); |
949 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); | 931 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); |
950 | if (conn) | 932 | if (conn) |
951 | req.type = conn->auth_type; | 933 | req.type = conn->auth_type; |
952 | hci_dev_unlock_bh(hdev); | 934 | hci_dev_unlock(hdev); |
953 | 935 | ||
954 | if (!conn) | 936 | if (!conn) |
955 | return -ENOENT; | 937 | return -ENOENT; |
956 | 938 | ||
957 | return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; | 939 | return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; |
958 | } | 940 | } |
941 | |||
942 | struct hci_chan *hci_chan_create(struct hci_conn *conn) | ||
943 | { | ||
944 | struct hci_dev *hdev = conn->hdev; | ||
945 | struct hci_chan *chan; | ||
946 | |||
947 | BT_DBG("%s conn %p", hdev->name, conn); | ||
948 | |||
949 | chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC); | ||
950 | if (!chan) | ||
951 | return NULL; | ||
952 | |||
953 | chan->conn = conn; | ||
954 | skb_queue_head_init(&chan->data_q); | ||
955 | |||
956 | list_add_rcu(&chan->list, &conn->chan_list); | ||
957 | |||
958 | return chan; | ||
959 | } | ||
960 | |||
961 | int hci_chan_del(struct hci_chan *chan) | ||
962 | { | ||
963 | struct hci_conn *conn = chan->conn; | ||
964 | struct hci_dev *hdev = conn->hdev; | ||
965 | |||
966 | BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); | ||
967 | |||
968 | list_del_rcu(&chan->list); | ||
969 | |||
970 | synchronize_rcu(); | ||
971 | |||
972 | skb_queue_purge(&chan->data_q); | ||
973 | kfree(chan); | ||
974 | |||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | void hci_chan_list_flush(struct hci_conn *conn) | ||
979 | { | ||
980 | struct hci_chan *chan; | ||
981 | |||
982 | BT_DBG("conn %p", conn); | ||
983 | |||
984 | list_for_each_entry_rcu(chan, &conn->chan_list, list) | ||
985 | hci_chan_del(chan); | ||
986 | } | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b84458dcc226..845da3ee56a0 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux | 2 | BlueZ - Bluetooth protocol stack for Linux |
3 | Copyright (C) 2000-2001 Qualcomm Incorporated | 3 | Copyright (C) 2000-2001 Qualcomm Incorporated |
4 | Copyright (C) 2011 ProFUSION Embedded Systems | ||
4 | 5 | ||
5 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> | 6 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
6 | 7 | ||
@@ -54,11 +55,11 @@ | |||
54 | 55 | ||
55 | #define AUTO_OFF_TIMEOUT 2000 | 56 | #define AUTO_OFF_TIMEOUT 2000 |
56 | 57 | ||
57 | static void hci_cmd_task(unsigned long arg); | 58 | int enable_hs; |
58 | static void hci_rx_task(unsigned long arg); | ||
59 | static void hci_tx_task(unsigned long arg); | ||
60 | 59 | ||
61 | static DEFINE_RWLOCK(hci_task_lock); | 60 | static void hci_rx_work(struct work_struct *work); |
61 | static void hci_cmd_work(struct work_struct *work); | ||
62 | static void hci_tx_work(struct work_struct *work); | ||
62 | 63 | ||
63 | /* HCI device list */ | 64 | /* HCI device list */ |
64 | LIST_HEAD(hci_dev_list); | 65 | LIST_HEAD(hci_dev_list); |
@@ -68,10 +69,6 @@ DEFINE_RWLOCK(hci_dev_list_lock); | |||
68 | LIST_HEAD(hci_cb_list); | 69 | LIST_HEAD(hci_cb_list); |
69 | DEFINE_RWLOCK(hci_cb_list_lock); | 70 | DEFINE_RWLOCK(hci_cb_list_lock); |
70 | 71 | ||
71 | /* HCI protocols */ | ||
72 | #define HCI_MAX_PROTO 2 | ||
73 | struct hci_proto *hci_proto[HCI_MAX_PROTO]; | ||
74 | |||
75 | /* HCI notifiers list */ | 72 | /* HCI notifiers list */ |
76 | static ATOMIC_NOTIFIER_HEAD(hci_notifier); | 73 | static ATOMIC_NOTIFIER_HEAD(hci_notifier); |
77 | 74 | ||
@@ -190,33 +187,20 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) | |||
190 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); | 187 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); |
191 | } | 188 | } |
192 | 189 | ||
193 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | 190 | static void bredr_init(struct hci_dev *hdev) |
194 | { | 191 | { |
195 | struct hci_cp_delete_stored_link_key cp; | 192 | struct hci_cp_delete_stored_link_key cp; |
196 | struct sk_buff *skb; | ||
197 | __le16 param; | 193 | __le16 param; |
198 | __u8 flt_type; | 194 | __u8 flt_type; |
199 | 195 | ||
200 | BT_DBG("%s %ld", hdev->name, opt); | 196 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; |
201 | |||
202 | /* Driver initialization */ | ||
203 | |||
204 | /* Special commands */ | ||
205 | while ((skb = skb_dequeue(&hdev->driver_init))) { | ||
206 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; | ||
207 | skb->dev = (void *) hdev; | ||
208 | |||
209 | skb_queue_tail(&hdev->cmd_q, skb); | ||
210 | tasklet_schedule(&hdev->cmd_task); | ||
211 | } | ||
212 | skb_queue_purge(&hdev->driver_init); | ||
213 | 197 | ||
214 | /* Mandatory initialization */ | 198 | /* Mandatory initialization */ |
215 | 199 | ||
216 | /* Reset */ | 200 | /* Reset */ |
217 | if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { | 201 | if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { |
218 | set_bit(HCI_RESET, &hdev->flags); | 202 | set_bit(HCI_RESET, &hdev->flags); |
219 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); | 203 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); |
220 | } | 204 | } |
221 | 205 | ||
222 | /* Read Local Supported Features */ | 206 | /* Read Local Supported Features */ |
@@ -228,18 +212,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
228 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | 212 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ |
229 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | 213 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); |
230 | 214 | ||
231 | #if 0 | ||
232 | /* Host buffer size */ | ||
233 | { | ||
234 | struct hci_cp_host_buffer_size cp; | ||
235 | cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); | ||
236 | cp.sco_mtu = HCI_MAX_SCO_SIZE; | ||
237 | cp.acl_max_pkt = cpu_to_le16(0xffff); | ||
238 | cp.sco_max_pkt = cpu_to_le16(0xffff); | ||
239 | hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); | ||
240 | } | ||
241 | #endif | ||
242 | |||
243 | /* Read BD Address */ | 215 | /* Read BD Address */ |
244 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); | 216 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); |
245 | 217 | ||
@@ -267,6 +239,51 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
267 | hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); | 239 | hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); |
268 | } | 240 | } |
269 | 241 | ||
242 | static void amp_init(struct hci_dev *hdev) | ||
243 | { | ||
244 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; | ||
245 | |||
246 | /* Reset */ | ||
247 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); | ||
248 | |||
249 | /* Read Local Version */ | ||
250 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | ||
251 | } | ||
252 | |||
253 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | ||
254 | { | ||
255 | struct sk_buff *skb; | ||
256 | |||
257 | BT_DBG("%s %ld", hdev->name, opt); | ||
258 | |||
259 | /* Driver initialization */ | ||
260 | |||
261 | /* Special commands */ | ||
262 | while ((skb = skb_dequeue(&hdev->driver_init))) { | ||
263 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; | ||
264 | skb->dev = (void *) hdev; | ||
265 | |||
266 | skb_queue_tail(&hdev->cmd_q, skb); | ||
267 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
268 | } | ||
269 | skb_queue_purge(&hdev->driver_init); | ||
270 | |||
271 | switch (hdev->dev_type) { | ||
272 | case HCI_BREDR: | ||
273 | bredr_init(hdev); | ||
274 | break; | ||
275 | |||
276 | case HCI_AMP: | ||
277 | amp_init(hdev); | ||
278 | break; | ||
279 | |||
280 | default: | ||
281 | BT_ERR("Unknown device type %d", hdev->dev_type); | ||
282 | break; | ||
283 | } | ||
284 | |||
285 | } | ||
286 | |||
270 | static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) | 287 | static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) |
271 | { | 288 | { |
272 | BT_DBG("%s", hdev->name); | 289 | BT_DBG("%s", hdev->name); |
@@ -319,8 +336,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) | |||
319 | * Device is held on return. */ | 336 | * Device is held on return. */ |
320 | struct hci_dev *hci_dev_get(int index) | 337 | struct hci_dev *hci_dev_get(int index) |
321 | { | 338 | { |
322 | struct hci_dev *hdev = NULL; | 339 | struct hci_dev *hdev = NULL, *d; |
323 | struct list_head *p; | ||
324 | 340 | ||
325 | BT_DBG("%d", index); | 341 | BT_DBG("%d", index); |
326 | 342 | ||
@@ -328,8 +344,7 @@ struct hci_dev *hci_dev_get(int index) | |||
328 | return NULL; | 344 | return NULL; |
329 | 345 | ||
330 | read_lock(&hci_dev_list_lock); | 346 | read_lock(&hci_dev_list_lock); |
331 | list_for_each(p, &hci_dev_list) { | 347 | list_for_each_entry(d, &hci_dev_list, list) { |
332 | struct hci_dev *d = list_entry(p, struct hci_dev, list); | ||
333 | if (d->id == index) { | 348 | if (d->id == index) { |
334 | hdev = hci_dev_hold(d); | 349 | hdev = hci_dev_hold(d); |
335 | break; | 350 | break; |
@@ -445,14 +460,14 @@ int hci_inquiry(void __user *arg) | |||
445 | if (!hdev) | 460 | if (!hdev) |
446 | return -ENODEV; | 461 | return -ENODEV; |
447 | 462 | ||
448 | hci_dev_lock_bh(hdev); | 463 | hci_dev_lock(hdev); |
449 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || | 464 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || |
450 | inquiry_cache_empty(hdev) || | 465 | inquiry_cache_empty(hdev) || |
451 | ir.flags & IREQ_CACHE_FLUSH) { | 466 | ir.flags & IREQ_CACHE_FLUSH) { |
452 | inquiry_cache_flush(hdev); | 467 | inquiry_cache_flush(hdev); |
453 | do_inquiry = 1; | 468 | do_inquiry = 1; |
454 | } | 469 | } |
455 | hci_dev_unlock_bh(hdev); | 470 | hci_dev_unlock(hdev); |
456 | 471 | ||
457 | timeo = ir.length * msecs_to_jiffies(2000); | 472 | timeo = ir.length * msecs_to_jiffies(2000); |
458 | 473 | ||
@@ -474,9 +489,9 @@ int hci_inquiry(void __user *arg) | |||
474 | goto done; | 489 | goto done; |
475 | } | 490 | } |
476 | 491 | ||
477 | hci_dev_lock_bh(hdev); | 492 | hci_dev_lock(hdev); |
478 | ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); | 493 | ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); |
479 | hci_dev_unlock_bh(hdev); | 494 | hci_dev_unlock(hdev); |
480 | 495 | ||
481 | BT_DBG("num_rsp %d", ir.num_rsp); | 496 | BT_DBG("num_rsp %d", ir.num_rsp); |
482 | 497 | ||
@@ -523,8 +538,9 @@ int hci_dev_open(__u16 dev) | |||
523 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | 538 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
524 | set_bit(HCI_RAW, &hdev->flags); | 539 | set_bit(HCI_RAW, &hdev->flags); |
525 | 540 | ||
526 | /* Treat all non BR/EDR controllers as raw devices for now */ | 541 | /* Treat all non BR/EDR controllers as raw devices if |
527 | if (hdev->dev_type != HCI_BREDR) | 542 | enable_hs is not set */ |
543 | if (hdev->dev_type != HCI_BREDR && !enable_hs) | ||
528 | set_bit(HCI_RAW, &hdev->flags); | 544 | set_bit(HCI_RAW, &hdev->flags); |
529 | 545 | ||
530 | if (hdev->open(hdev)) { | 546 | if (hdev->open(hdev)) { |
@@ -551,13 +567,16 @@ int hci_dev_open(__u16 dev) | |||
551 | hci_dev_hold(hdev); | 567 | hci_dev_hold(hdev); |
552 | set_bit(HCI_UP, &hdev->flags); | 568 | set_bit(HCI_UP, &hdev->flags); |
553 | hci_notify(hdev, HCI_DEV_UP); | 569 | hci_notify(hdev, HCI_DEV_UP); |
554 | if (!test_bit(HCI_SETUP, &hdev->flags)) | 570 | if (!test_bit(HCI_SETUP, &hdev->flags)) { |
555 | mgmt_powered(hdev->id, 1); | 571 | hci_dev_lock(hdev); |
572 | mgmt_powered(hdev, 1); | ||
573 | hci_dev_unlock(hdev); | ||
574 | } | ||
556 | } else { | 575 | } else { |
557 | /* Init failed, cleanup */ | 576 | /* Init failed, cleanup */ |
558 | tasklet_kill(&hdev->rx_task); | 577 | flush_work(&hdev->tx_work); |
559 | tasklet_kill(&hdev->tx_task); | 578 | flush_work(&hdev->cmd_work); |
560 | tasklet_kill(&hdev->cmd_task); | 579 | flush_work(&hdev->rx_work); |
561 | 580 | ||
562 | skb_queue_purge(&hdev->cmd_q); | 581 | skb_queue_purge(&hdev->cmd_q); |
563 | skb_queue_purge(&hdev->rx_q); | 582 | skb_queue_purge(&hdev->rx_q); |
@@ -593,14 +612,25 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
593 | return 0; | 612 | return 0; |
594 | } | 613 | } |
595 | 614 | ||
596 | /* Kill RX and TX tasks */ | 615 | /* Flush RX and TX works */ |
597 | tasklet_kill(&hdev->rx_task); | 616 | flush_work(&hdev->tx_work); |
598 | tasklet_kill(&hdev->tx_task); | 617 | flush_work(&hdev->rx_work); |
618 | |||
619 | if (hdev->discov_timeout > 0) { | ||
620 | cancel_delayed_work(&hdev->discov_off); | ||
621 | hdev->discov_timeout = 0; | ||
622 | } | ||
599 | 623 | ||
600 | hci_dev_lock_bh(hdev); | 624 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
625 | cancel_delayed_work(&hdev->power_off); | ||
626 | |||
627 | if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
628 | cancel_delayed_work(&hdev->service_cache); | ||
629 | |||
630 | hci_dev_lock(hdev); | ||
601 | inquiry_cache_flush(hdev); | 631 | inquiry_cache_flush(hdev); |
602 | hci_conn_hash_flush(hdev); | 632 | hci_conn_hash_flush(hdev); |
603 | hci_dev_unlock_bh(hdev); | 633 | hci_dev_unlock(hdev); |
604 | 634 | ||
605 | hci_notify(hdev, HCI_DEV_DOWN); | 635 | hci_notify(hdev, HCI_DEV_DOWN); |
606 | 636 | ||
@@ -617,8 +647,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
617 | clear_bit(HCI_INIT, &hdev->flags); | 647 | clear_bit(HCI_INIT, &hdev->flags); |
618 | } | 648 | } |
619 | 649 | ||
620 | /* Kill cmd task */ | 650 | /* flush cmd work */ |
621 | tasklet_kill(&hdev->cmd_task); | 651 | flush_work(&hdev->cmd_work); |
622 | 652 | ||
623 | /* Drop queues */ | 653 | /* Drop queues */ |
624 | skb_queue_purge(&hdev->rx_q); | 654 | skb_queue_purge(&hdev->rx_q); |
@@ -636,7 +666,9 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
636 | * and no tasks are scheduled. */ | 666 | * and no tasks are scheduled. */ |
637 | hdev->close(hdev); | 667 | hdev->close(hdev); |
638 | 668 | ||
639 | mgmt_powered(hdev->id, 0); | 669 | hci_dev_lock(hdev); |
670 | mgmt_powered(hdev, 0); | ||
671 | hci_dev_unlock(hdev); | ||
640 | 672 | ||
641 | /* Clear flags */ | 673 | /* Clear flags */ |
642 | hdev->flags = 0; | 674 | hdev->flags = 0; |
@@ -670,7 +702,6 @@ int hci_dev_reset(__u16 dev) | |||
670 | return -ENODEV; | 702 | return -ENODEV; |
671 | 703 | ||
672 | hci_req_lock(hdev); | 704 | hci_req_lock(hdev); |
673 | tasklet_disable(&hdev->tx_task); | ||
674 | 705 | ||
675 | if (!test_bit(HCI_UP, &hdev->flags)) | 706 | if (!test_bit(HCI_UP, &hdev->flags)) |
676 | goto done; | 707 | goto done; |
@@ -679,10 +710,10 @@ int hci_dev_reset(__u16 dev) | |||
679 | skb_queue_purge(&hdev->rx_q); | 710 | skb_queue_purge(&hdev->rx_q); |
680 | skb_queue_purge(&hdev->cmd_q); | 711 | skb_queue_purge(&hdev->cmd_q); |
681 | 712 | ||
682 | hci_dev_lock_bh(hdev); | 713 | hci_dev_lock(hdev); |
683 | inquiry_cache_flush(hdev); | 714 | inquiry_cache_flush(hdev); |
684 | hci_conn_hash_flush(hdev); | 715 | hci_conn_hash_flush(hdev); |
685 | hci_dev_unlock_bh(hdev); | 716 | hci_dev_unlock(hdev); |
686 | 717 | ||
687 | if (hdev->flush) | 718 | if (hdev->flush) |
688 | hdev->flush(hdev); | 719 | hdev->flush(hdev); |
@@ -695,7 +726,6 @@ int hci_dev_reset(__u16 dev) | |||
695 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); | 726 | msecs_to_jiffies(HCI_INIT_TIMEOUT)); |
696 | 727 | ||
697 | done: | 728 | done: |
698 | tasklet_enable(&hdev->tx_task); | ||
699 | hci_req_unlock(hdev); | 729 | hci_req_unlock(hdev); |
700 | hci_dev_put(hdev); | 730 | hci_dev_put(hdev); |
701 | return ret; | 731 | return ret; |
@@ -794,9 +824,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
794 | 824 | ||
795 | int hci_get_dev_list(void __user *arg) | 825 | int hci_get_dev_list(void __user *arg) |
796 | { | 826 | { |
827 | struct hci_dev *hdev; | ||
797 | struct hci_dev_list_req *dl; | 828 | struct hci_dev_list_req *dl; |
798 | struct hci_dev_req *dr; | 829 | struct hci_dev_req *dr; |
799 | struct list_head *p; | ||
800 | int n = 0, size, err; | 830 | int n = 0, size, err; |
801 | __u16 dev_num; | 831 | __u16 dev_num; |
802 | 832 | ||
@@ -814,13 +844,10 @@ int hci_get_dev_list(void __user *arg) | |||
814 | 844 | ||
815 | dr = dl->dev_req; | 845 | dr = dl->dev_req; |
816 | 846 | ||
817 | read_lock_bh(&hci_dev_list_lock); | 847 | read_lock(&hci_dev_list_lock); |
818 | list_for_each(p, &hci_dev_list) { | 848 | list_for_each_entry(hdev, &hci_dev_list, list) { |
819 | struct hci_dev *hdev; | 849 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
820 | 850 | cancel_delayed_work(&hdev->power_off); | |
821 | hdev = list_entry(p, struct hci_dev, list); | ||
822 | |||
823 | hci_del_off_timer(hdev); | ||
824 | 851 | ||
825 | if (!test_bit(HCI_MGMT, &hdev->flags)) | 852 | if (!test_bit(HCI_MGMT, &hdev->flags)) |
826 | set_bit(HCI_PAIRABLE, &hdev->flags); | 853 | set_bit(HCI_PAIRABLE, &hdev->flags); |
@@ -831,7 +858,7 @@ int hci_get_dev_list(void __user *arg) | |||
831 | if (++n >= dev_num) | 858 | if (++n >= dev_num) |
832 | break; | 859 | break; |
833 | } | 860 | } |
834 | read_unlock_bh(&hci_dev_list_lock); | 861 | read_unlock(&hci_dev_list_lock); |
835 | 862 | ||
836 | dl->dev_num = n; | 863 | dl->dev_num = n; |
837 | size = sizeof(*dl) + n * sizeof(*dr); | 864 | size = sizeof(*dl) + n * sizeof(*dr); |
@@ -855,7 +882,8 @@ int hci_get_dev_info(void __user *arg) | |||
855 | if (!hdev) | 882 | if (!hdev) |
856 | return -ENODEV; | 883 | return -ENODEV; |
857 | 884 | ||
858 | hci_del_off_timer(hdev); | 885 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
886 | cancel_delayed_work_sync(&hdev->power_off); | ||
859 | 887 | ||
860 | if (!test_bit(HCI_MGMT, &hdev->flags)) | 888 | if (!test_bit(HCI_MGMT, &hdev->flags)) |
861 | set_bit(HCI_PAIRABLE, &hdev->flags); | 889 | set_bit(HCI_PAIRABLE, &hdev->flags); |
@@ -912,6 +940,7 @@ struct hci_dev *hci_alloc_dev(void) | |||
912 | if (!hdev) | 940 | if (!hdev) |
913 | return NULL; | 941 | return NULL; |
914 | 942 | ||
943 | hci_init_sysfs(hdev); | ||
915 | skb_queue_head_init(&hdev->driver_init); | 944 | skb_queue_head_init(&hdev->driver_init); |
916 | 945 | ||
917 | return hdev; | 946 | return hdev; |
@@ -938,39 +967,41 @@ static void hci_power_on(struct work_struct *work) | |||
938 | return; | 967 | return; |
939 | 968 | ||
940 | if (test_bit(HCI_AUTO_OFF, &hdev->flags)) | 969 | if (test_bit(HCI_AUTO_OFF, &hdev->flags)) |
941 | mod_timer(&hdev->off_timer, | 970 | schedule_delayed_work(&hdev->power_off, |
942 | jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); | 971 | msecs_to_jiffies(AUTO_OFF_TIMEOUT)); |
943 | 972 | ||
944 | if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) | 973 | if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) |
945 | mgmt_index_added(hdev->id); | 974 | mgmt_index_added(hdev); |
946 | } | 975 | } |
947 | 976 | ||
948 | static void hci_power_off(struct work_struct *work) | 977 | static void hci_power_off(struct work_struct *work) |
949 | { | 978 | { |
950 | struct hci_dev *hdev = container_of(work, struct hci_dev, power_off); | 979 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
980 | power_off.work); | ||
951 | 981 | ||
952 | BT_DBG("%s", hdev->name); | 982 | BT_DBG("%s", hdev->name); |
953 | 983 | ||
984 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | ||
985 | |||
954 | hci_dev_close(hdev->id); | 986 | hci_dev_close(hdev->id); |
955 | } | 987 | } |
956 | 988 | ||
957 | static void hci_auto_off(unsigned long data) | 989 | static void hci_discov_off(struct work_struct *work) |
958 | { | 990 | { |
959 | struct hci_dev *hdev = (struct hci_dev *) data; | 991 | struct hci_dev *hdev; |
992 | u8 scan = SCAN_PAGE; | ||
993 | |||
994 | hdev = container_of(work, struct hci_dev, discov_off.work); | ||
960 | 995 | ||
961 | BT_DBG("%s", hdev->name); | 996 | BT_DBG("%s", hdev->name); |
962 | 997 | ||
963 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | 998 | hci_dev_lock(hdev); |
964 | 999 | ||
965 | queue_work(hdev->workqueue, &hdev->power_off); | 1000 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); |
966 | } | ||
967 | 1001 | ||
968 | void hci_del_off_timer(struct hci_dev *hdev) | 1002 | hdev->discov_timeout = 0; |
969 | { | ||
970 | BT_DBG("%s", hdev->name); | ||
971 | 1003 | ||
972 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | 1004 | hci_dev_unlock(hdev); |
973 | del_timer(&hdev->off_timer); | ||
974 | } | 1005 | } |
975 | 1006 | ||
976 | int hci_uuids_clear(struct hci_dev *hdev) | 1007 | int hci_uuids_clear(struct hci_dev *hdev) |
@@ -1007,16 +1038,11 @@ int hci_link_keys_clear(struct hci_dev *hdev) | |||
1007 | 1038 | ||
1008 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | 1039 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) |
1009 | { | 1040 | { |
1010 | struct list_head *p; | 1041 | struct link_key *k; |
1011 | |||
1012 | list_for_each(p, &hdev->link_keys) { | ||
1013 | struct link_key *k; | ||
1014 | |||
1015 | k = list_entry(p, struct link_key, list); | ||
1016 | 1042 | ||
1043 | list_for_each_entry(k, &hdev->link_keys, list) | ||
1017 | if (bacmp(bdaddr, &k->bdaddr) == 0) | 1044 | if (bacmp(bdaddr, &k->bdaddr) == 0) |
1018 | return k; | 1045 | return k; |
1019 | } | ||
1020 | 1046 | ||
1021 | return NULL; | 1047 | return NULL; |
1022 | } | 1048 | } |
@@ -1138,7 +1164,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, | |||
1138 | 1164 | ||
1139 | persistent = hci_persistent_key(hdev, conn, type, old_key_type); | 1165 | persistent = hci_persistent_key(hdev, conn, type, old_key_type); |
1140 | 1166 | ||
1141 | mgmt_new_key(hdev->id, key, persistent); | 1167 | mgmt_new_link_key(hdev, key, persistent); |
1142 | 1168 | ||
1143 | if (!persistent) { | 1169 | if (!persistent) { |
1144 | list_del(&key->list); | 1170 | list_del(&key->list); |
@@ -1181,7 +1207,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, | |||
1181 | memcpy(id->rand, rand, sizeof(id->rand)); | 1207 | memcpy(id->rand, rand, sizeof(id->rand)); |
1182 | 1208 | ||
1183 | if (new_key) | 1209 | if (new_key) |
1184 | mgmt_new_key(hdev->id, key, old_key_type); | 1210 | mgmt_new_link_key(hdev, key, old_key_type); |
1185 | 1211 | ||
1186 | return 0; | 1212 | return 0; |
1187 | } | 1213 | } |
@@ -1209,7 +1235,7 @@ static void hci_cmd_timer(unsigned long arg) | |||
1209 | 1235 | ||
1210 | BT_ERR("%s command tx timeout", hdev->name); | 1236 | BT_ERR("%s command tx timeout", hdev->name); |
1211 | atomic_set(&hdev->cmd_cnt, 1); | 1237 | atomic_set(&hdev->cmd_cnt, 1); |
1212 | tasklet_schedule(&hdev->cmd_task); | 1238 | queue_work(hdev->workqueue, &hdev->cmd_work); |
1213 | } | 1239 | } |
1214 | 1240 | ||
1215 | struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, | 1241 | struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, |
@@ -1279,16 +1305,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, | |||
1279 | struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, | 1305 | struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, |
1280 | bdaddr_t *bdaddr) | 1306 | bdaddr_t *bdaddr) |
1281 | { | 1307 | { |
1282 | struct list_head *p; | 1308 | struct bdaddr_list *b; |
1283 | |||
1284 | list_for_each(p, &hdev->blacklist) { | ||
1285 | struct bdaddr_list *b; | ||
1286 | |||
1287 | b = list_entry(p, struct bdaddr_list, list); | ||
1288 | 1309 | ||
1310 | list_for_each_entry(b, &hdev->blacklist, list) | ||
1289 | if (bacmp(bdaddr, &b->bdaddr) == 0) | 1311 | if (bacmp(bdaddr, &b->bdaddr) == 0) |
1290 | return b; | 1312 | return b; |
1291 | } | ||
1292 | 1313 | ||
1293 | return NULL; | 1314 | return NULL; |
1294 | } | 1315 | } |
@@ -1327,31 +1348,30 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1327 | 1348 | ||
1328 | list_add(&entry->list, &hdev->blacklist); | 1349 | list_add(&entry->list, &hdev->blacklist); |
1329 | 1350 | ||
1330 | return mgmt_device_blocked(hdev->id, bdaddr); | 1351 | return mgmt_device_blocked(hdev, bdaddr); |
1331 | } | 1352 | } |
1332 | 1353 | ||
1333 | int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) | 1354 | int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) |
1334 | { | 1355 | { |
1335 | struct bdaddr_list *entry; | 1356 | struct bdaddr_list *entry; |
1336 | 1357 | ||
1337 | if (bacmp(bdaddr, BDADDR_ANY) == 0) { | 1358 | if (bacmp(bdaddr, BDADDR_ANY) == 0) |
1338 | return hci_blacklist_clear(hdev); | 1359 | return hci_blacklist_clear(hdev); |
1339 | } | ||
1340 | 1360 | ||
1341 | entry = hci_blacklist_lookup(hdev, bdaddr); | 1361 | entry = hci_blacklist_lookup(hdev, bdaddr); |
1342 | if (!entry) { | 1362 | if (!entry) |
1343 | return -ENOENT; | 1363 | return -ENOENT; |
1344 | } | ||
1345 | 1364 | ||
1346 | list_del(&entry->list); | 1365 | list_del(&entry->list); |
1347 | kfree(entry); | 1366 | kfree(entry); |
1348 | 1367 | ||
1349 | return mgmt_device_unblocked(hdev->id, bdaddr); | 1368 | return mgmt_device_unblocked(hdev, bdaddr); |
1350 | } | 1369 | } |
1351 | 1370 | ||
1352 | static void hci_clear_adv_cache(unsigned long arg) | 1371 | static void hci_clear_adv_cache(struct work_struct *work) |
1353 | { | 1372 | { |
1354 | struct hci_dev *hdev = (void *) arg; | 1373 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
1374 | adv_work.work); | ||
1355 | 1375 | ||
1356 | hci_dev_lock(hdev); | 1376 | hci_dev_lock(hdev); |
1357 | 1377 | ||
@@ -1425,7 +1445,7 @@ int hci_add_adv_entry(struct hci_dev *hdev, | |||
1425 | int hci_register_dev(struct hci_dev *hdev) | 1445 | int hci_register_dev(struct hci_dev *hdev) |
1426 | { | 1446 | { |
1427 | struct list_head *head = &hci_dev_list, *p; | 1447 | struct list_head *head = &hci_dev_list, *p; |
1428 | int i, id = 0; | 1448 | int i, id, error; |
1429 | 1449 | ||
1430 | BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, | 1450 | BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, |
1431 | hdev->bus, hdev->owner); | 1451 | hdev->bus, hdev->owner); |
@@ -1433,7 +1453,12 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1433 | if (!hdev->open || !hdev->close || !hdev->destruct) | 1453 | if (!hdev->open || !hdev->close || !hdev->destruct) |
1434 | return -EINVAL; | 1454 | return -EINVAL; |
1435 | 1455 | ||
1436 | write_lock_bh(&hci_dev_list_lock); | 1456 | /* Do not allow HCI_AMP devices to register at index 0, |
1457 | * so the index can be used as the AMP controller ID. | ||
1458 | */ | ||
1459 | id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; | ||
1460 | |||
1461 | write_lock(&hci_dev_list_lock); | ||
1437 | 1462 | ||
1438 | /* Find first available device id */ | 1463 | /* Find first available device id */ |
1439 | list_for_each(p, &hci_dev_list) { | 1464 | list_for_each(p, &hci_dev_list) { |
@@ -1444,12 +1469,13 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1444 | 1469 | ||
1445 | sprintf(hdev->name, "hci%d", id); | 1470 | sprintf(hdev->name, "hci%d", id); |
1446 | hdev->id = id; | 1471 | hdev->id = id; |
1447 | list_add(&hdev->list, head); | 1472 | list_add_tail(&hdev->list, head); |
1448 | 1473 | ||
1449 | atomic_set(&hdev->refcnt, 1); | 1474 | atomic_set(&hdev->refcnt, 1); |
1450 | spin_lock_init(&hdev->lock); | 1475 | mutex_init(&hdev->lock); |
1451 | 1476 | ||
1452 | hdev->flags = 0; | 1477 | hdev->flags = 0; |
1478 | hdev->dev_flags = 0; | ||
1453 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); | 1479 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); |
1454 | hdev->esco_type = (ESCO_HV1); | 1480 | hdev->esco_type = (ESCO_HV1); |
1455 | hdev->link_mode = (HCI_LM_ACCEPT); | 1481 | hdev->link_mode = (HCI_LM_ACCEPT); |
@@ -1459,9 +1485,10 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1459 | hdev->sniff_max_interval = 800; | 1485 | hdev->sniff_max_interval = 800; |
1460 | hdev->sniff_min_interval = 80; | 1486 | hdev->sniff_min_interval = 80; |
1461 | 1487 | ||
1462 | tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev); | 1488 | INIT_WORK(&hdev->rx_work, hci_rx_work); |
1463 | tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); | 1489 | INIT_WORK(&hdev->cmd_work, hci_cmd_work); |
1464 | tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); | 1490 | INIT_WORK(&hdev->tx_work, hci_tx_work); |
1491 | |||
1465 | 1492 | ||
1466 | skb_queue_head_init(&hdev->rx_q); | 1493 | skb_queue_head_init(&hdev->rx_q); |
1467 | skb_queue_head_init(&hdev->cmd_q); | 1494 | skb_queue_head_init(&hdev->cmd_q); |
@@ -1479,6 +1506,8 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1479 | 1506 | ||
1480 | hci_conn_hash_init(hdev); | 1507 | hci_conn_hash_init(hdev); |
1481 | 1508 | ||
1509 | INIT_LIST_HEAD(&hdev->mgmt_pending); | ||
1510 | |||
1482 | INIT_LIST_HEAD(&hdev->blacklist); | 1511 | INIT_LIST_HEAD(&hdev->blacklist); |
1483 | 1512 | ||
1484 | INIT_LIST_HEAD(&hdev->uuids); | 1513 | INIT_LIST_HEAD(&hdev->uuids); |
@@ -1488,24 +1517,29 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1488 | INIT_LIST_HEAD(&hdev->remote_oob_data); | 1517 | INIT_LIST_HEAD(&hdev->remote_oob_data); |
1489 | 1518 | ||
1490 | INIT_LIST_HEAD(&hdev->adv_entries); | 1519 | INIT_LIST_HEAD(&hdev->adv_entries); |
1491 | setup_timer(&hdev->adv_timer, hci_clear_adv_cache, | ||
1492 | (unsigned long) hdev); | ||
1493 | 1520 | ||
1521 | INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); | ||
1494 | INIT_WORK(&hdev->power_on, hci_power_on); | 1522 | INIT_WORK(&hdev->power_on, hci_power_on); |
1495 | INIT_WORK(&hdev->power_off, hci_power_off); | 1523 | INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); |
1496 | setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); | 1524 | |
1525 | INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); | ||
1497 | 1526 | ||
1498 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); | 1527 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); |
1499 | 1528 | ||
1500 | atomic_set(&hdev->promisc, 0); | 1529 | atomic_set(&hdev->promisc, 0); |
1501 | 1530 | ||
1502 | write_unlock_bh(&hci_dev_list_lock); | 1531 | write_unlock(&hci_dev_list_lock); |
1503 | 1532 | ||
1504 | hdev->workqueue = create_singlethread_workqueue(hdev->name); | 1533 | hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | |
1505 | if (!hdev->workqueue) | 1534 | WQ_MEM_RECLAIM, 1); |
1506 | goto nomem; | 1535 | if (!hdev->workqueue) { |
1536 | error = -ENOMEM; | ||
1537 | goto err; | ||
1538 | } | ||
1507 | 1539 | ||
1508 | hci_register_sysfs(hdev); | 1540 | error = hci_add_sysfs(hdev); |
1541 | if (error < 0) | ||
1542 | goto err_wqueue; | ||
1509 | 1543 | ||
1510 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, | 1544 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, |
1511 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); | 1545 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); |
@@ -1518,31 +1552,33 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1518 | 1552 | ||
1519 | set_bit(HCI_AUTO_OFF, &hdev->flags); | 1553 | set_bit(HCI_AUTO_OFF, &hdev->flags); |
1520 | set_bit(HCI_SETUP, &hdev->flags); | 1554 | set_bit(HCI_SETUP, &hdev->flags); |
1521 | queue_work(hdev->workqueue, &hdev->power_on); | 1555 | schedule_work(&hdev->power_on); |
1522 | 1556 | ||
1523 | hci_notify(hdev, HCI_DEV_REG); | 1557 | hci_notify(hdev, HCI_DEV_REG); |
1524 | 1558 | ||
1525 | return id; | 1559 | return id; |
1526 | 1560 | ||
1527 | nomem: | 1561 | err_wqueue: |
1528 | write_lock_bh(&hci_dev_list_lock); | 1562 | destroy_workqueue(hdev->workqueue); |
1563 | err: | ||
1564 | write_lock(&hci_dev_list_lock); | ||
1529 | list_del(&hdev->list); | 1565 | list_del(&hdev->list); |
1530 | write_unlock_bh(&hci_dev_list_lock); | 1566 | write_unlock(&hci_dev_list_lock); |
1531 | 1567 | ||
1532 | return -ENOMEM; | 1568 | return error; |
1533 | } | 1569 | } |
1534 | EXPORT_SYMBOL(hci_register_dev); | 1570 | EXPORT_SYMBOL(hci_register_dev); |
1535 | 1571 | ||
1536 | /* Unregister HCI device */ | 1572 | /* Unregister HCI device */ |
1537 | int hci_unregister_dev(struct hci_dev *hdev) | 1573 | void hci_unregister_dev(struct hci_dev *hdev) |
1538 | { | 1574 | { |
1539 | int i; | 1575 | int i; |
1540 | 1576 | ||
1541 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | 1577 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
1542 | 1578 | ||
1543 | write_lock_bh(&hci_dev_list_lock); | 1579 | write_lock(&hci_dev_list_lock); |
1544 | list_del(&hdev->list); | 1580 | list_del(&hdev->list); |
1545 | write_unlock_bh(&hci_dev_list_lock); | 1581 | write_unlock(&hci_dev_list_lock); |
1546 | 1582 | ||
1547 | hci_dev_do_close(hdev); | 1583 | hci_dev_do_close(hdev); |
1548 | 1584 | ||
@@ -1550,8 +1586,15 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
1550 | kfree_skb(hdev->reassembly[i]); | 1586 | kfree_skb(hdev->reassembly[i]); |
1551 | 1587 | ||
1552 | if (!test_bit(HCI_INIT, &hdev->flags) && | 1588 | if (!test_bit(HCI_INIT, &hdev->flags) && |
1553 | !test_bit(HCI_SETUP, &hdev->flags)) | 1589 | !test_bit(HCI_SETUP, &hdev->flags)) { |
1554 | mgmt_index_removed(hdev->id); | 1590 | hci_dev_lock(hdev); |
1591 | mgmt_index_removed(hdev); | ||
1592 | hci_dev_unlock(hdev); | ||
1593 | } | ||
1594 | |||
1595 | /* mgmt_index_removed should take care of emptying the | ||
1596 | * pending list */ | ||
1597 | BUG_ON(!list_empty(&hdev->mgmt_pending)); | ||
1555 | 1598 | ||
1556 | hci_notify(hdev, HCI_DEV_UNREG); | 1599 | hci_notify(hdev, HCI_DEV_UNREG); |
1557 | 1600 | ||
@@ -1560,24 +1603,21 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
1560 | rfkill_destroy(hdev->rfkill); | 1603 | rfkill_destroy(hdev->rfkill); |
1561 | } | 1604 | } |
1562 | 1605 | ||
1563 | hci_unregister_sysfs(hdev); | 1606 | hci_del_sysfs(hdev); |
1564 | 1607 | ||
1565 | hci_del_off_timer(hdev); | 1608 | cancel_delayed_work_sync(&hdev->adv_work); |
1566 | del_timer(&hdev->adv_timer); | ||
1567 | 1609 | ||
1568 | destroy_workqueue(hdev->workqueue); | 1610 | destroy_workqueue(hdev->workqueue); |
1569 | 1611 | ||
1570 | hci_dev_lock_bh(hdev); | 1612 | hci_dev_lock(hdev); |
1571 | hci_blacklist_clear(hdev); | 1613 | hci_blacklist_clear(hdev); |
1572 | hci_uuids_clear(hdev); | 1614 | hci_uuids_clear(hdev); |
1573 | hci_link_keys_clear(hdev); | 1615 | hci_link_keys_clear(hdev); |
1574 | hci_remote_oob_data_clear(hdev); | 1616 | hci_remote_oob_data_clear(hdev); |
1575 | hci_adv_entries_clear(hdev); | 1617 | hci_adv_entries_clear(hdev); |
1576 | hci_dev_unlock_bh(hdev); | 1618 | hci_dev_unlock(hdev); |
1577 | 1619 | ||
1578 | __hci_dev_put(hdev); | 1620 | __hci_dev_put(hdev); |
1579 | |||
1580 | return 0; | ||
1581 | } | 1621 | } |
1582 | EXPORT_SYMBOL(hci_unregister_dev); | 1622 | EXPORT_SYMBOL(hci_unregister_dev); |
1583 | 1623 | ||
@@ -1613,9 +1653,8 @@ int hci_recv_frame(struct sk_buff *skb) | |||
1613 | /* Time stamp */ | 1653 | /* Time stamp */ |
1614 | __net_timestamp(skb); | 1654 | __net_timestamp(skb); |
1615 | 1655 | ||
1616 | /* Queue frame for rx task */ | ||
1617 | skb_queue_tail(&hdev->rx_q, skb); | 1656 | skb_queue_tail(&hdev->rx_q, skb); |
1618 | tasklet_schedule(&hdev->rx_task); | 1657 | queue_work(hdev->workqueue, &hdev->rx_work); |
1619 | 1658 | ||
1620 | return 0; | 1659 | return 0; |
1621 | } | 1660 | } |
@@ -1787,59 +1826,13 @@ EXPORT_SYMBOL(hci_recv_stream_fragment); | |||
1787 | 1826 | ||
1788 | /* ---- Interface to upper protocols ---- */ | 1827 | /* ---- Interface to upper protocols ---- */ |
1789 | 1828 | ||
1790 | /* Register/Unregister protocols. | ||
1791 | * hci_task_lock is used to ensure that no tasks are running. */ | ||
1792 | int hci_register_proto(struct hci_proto *hp) | ||
1793 | { | ||
1794 | int err = 0; | ||
1795 | |||
1796 | BT_DBG("%p name %s id %d", hp, hp->name, hp->id); | ||
1797 | |||
1798 | if (hp->id >= HCI_MAX_PROTO) | ||
1799 | return -EINVAL; | ||
1800 | |||
1801 | write_lock_bh(&hci_task_lock); | ||
1802 | |||
1803 | if (!hci_proto[hp->id]) | ||
1804 | hci_proto[hp->id] = hp; | ||
1805 | else | ||
1806 | err = -EEXIST; | ||
1807 | |||
1808 | write_unlock_bh(&hci_task_lock); | ||
1809 | |||
1810 | return err; | ||
1811 | } | ||
1812 | EXPORT_SYMBOL(hci_register_proto); | ||
1813 | |||
1814 | int hci_unregister_proto(struct hci_proto *hp) | ||
1815 | { | ||
1816 | int err = 0; | ||
1817 | |||
1818 | BT_DBG("%p name %s id %d", hp, hp->name, hp->id); | ||
1819 | |||
1820 | if (hp->id >= HCI_MAX_PROTO) | ||
1821 | return -EINVAL; | ||
1822 | |||
1823 | write_lock_bh(&hci_task_lock); | ||
1824 | |||
1825 | if (hci_proto[hp->id]) | ||
1826 | hci_proto[hp->id] = NULL; | ||
1827 | else | ||
1828 | err = -ENOENT; | ||
1829 | |||
1830 | write_unlock_bh(&hci_task_lock); | ||
1831 | |||
1832 | return err; | ||
1833 | } | ||
1834 | EXPORT_SYMBOL(hci_unregister_proto); | ||
1835 | |||
1836 | int hci_register_cb(struct hci_cb *cb) | 1829 | int hci_register_cb(struct hci_cb *cb) |
1837 | { | 1830 | { |
1838 | BT_DBG("%p name %s", cb, cb->name); | 1831 | BT_DBG("%p name %s", cb, cb->name); |
1839 | 1832 | ||
1840 | write_lock_bh(&hci_cb_list_lock); | 1833 | write_lock(&hci_cb_list_lock); |
1841 | list_add(&cb->list, &hci_cb_list); | 1834 | list_add(&cb->list, &hci_cb_list); |
1842 | write_unlock_bh(&hci_cb_list_lock); | 1835 | write_unlock(&hci_cb_list_lock); |
1843 | 1836 | ||
1844 | return 0; | 1837 | return 0; |
1845 | } | 1838 | } |
@@ -1849,9 +1842,9 @@ int hci_unregister_cb(struct hci_cb *cb) | |||
1849 | { | 1842 | { |
1850 | BT_DBG("%p name %s", cb, cb->name); | 1843 | BT_DBG("%p name %s", cb, cb->name); |
1851 | 1844 | ||
1852 | write_lock_bh(&hci_cb_list_lock); | 1845 | write_lock(&hci_cb_list_lock); |
1853 | list_del(&cb->list); | 1846 | list_del(&cb->list); |
1854 | write_unlock_bh(&hci_cb_list_lock); | 1847 | write_unlock(&hci_cb_list_lock); |
1855 | 1848 | ||
1856 | return 0; | 1849 | return 0; |
1857 | } | 1850 | } |
@@ -1912,7 +1905,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) | |||
1912 | hdev->init_last_cmd = opcode; | 1905 | hdev->init_last_cmd = opcode; |
1913 | 1906 | ||
1914 | skb_queue_tail(&hdev->cmd_q, skb); | 1907 | skb_queue_tail(&hdev->cmd_q, skb); |
1915 | tasklet_schedule(&hdev->cmd_task); | 1908 | queue_work(hdev->workqueue, &hdev->cmd_work); |
1916 | 1909 | ||
1917 | return 0; | 1910 | return 0; |
1918 | } | 1911 | } |
@@ -1948,23 +1941,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |||
1948 | hdr->dlen = cpu_to_le16(len); | 1941 | hdr->dlen = cpu_to_le16(len); |
1949 | } | 1942 | } |
1950 | 1943 | ||
1951 | void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | 1944 | static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, |
1945 | struct sk_buff *skb, __u16 flags) | ||
1952 | { | 1946 | { |
1953 | struct hci_dev *hdev = conn->hdev; | 1947 | struct hci_dev *hdev = conn->hdev; |
1954 | struct sk_buff *list; | 1948 | struct sk_buff *list; |
1955 | 1949 | ||
1956 | BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); | ||
1957 | |||
1958 | skb->dev = (void *) hdev; | ||
1959 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; | ||
1960 | hci_add_acl_hdr(skb, conn->handle, flags); | ||
1961 | |||
1962 | list = skb_shinfo(skb)->frag_list; | 1950 | list = skb_shinfo(skb)->frag_list; |
1963 | if (!list) { | 1951 | if (!list) { |
1964 | /* Non fragmented */ | 1952 | /* Non fragmented */ |
1965 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); | 1953 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); |
1966 | 1954 | ||
1967 | skb_queue_tail(&conn->data_q, skb); | 1955 | skb_queue_tail(queue, skb); |
1968 | } else { | 1956 | } else { |
1969 | /* Fragmented */ | 1957 | /* Fragmented */ |
1970 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | 1958 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
@@ -1972,9 +1960,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |||
1972 | skb_shinfo(skb)->frag_list = NULL; | 1960 | skb_shinfo(skb)->frag_list = NULL; |
1973 | 1961 | ||
1974 | /* Queue all fragments atomically */ | 1962 | /* Queue all fragments atomically */ |
1975 | spin_lock_bh(&conn->data_q.lock); | 1963 | spin_lock(&queue->lock); |
1976 | 1964 | ||
1977 | __skb_queue_tail(&conn->data_q, skb); | 1965 | __skb_queue_tail(queue, skb); |
1978 | 1966 | ||
1979 | flags &= ~ACL_START; | 1967 | flags &= ~ACL_START; |
1980 | flags |= ACL_CONT; | 1968 | flags |= ACL_CONT; |
@@ -1987,13 +1975,27 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |||
1987 | 1975 | ||
1988 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | 1976 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
1989 | 1977 | ||
1990 | __skb_queue_tail(&conn->data_q, skb); | 1978 | __skb_queue_tail(queue, skb); |
1991 | } while (list); | 1979 | } while (list); |
1992 | 1980 | ||
1993 | spin_unlock_bh(&conn->data_q.lock); | 1981 | spin_unlock(&queue->lock); |
1994 | } | 1982 | } |
1983 | } | ||
1984 | |||
1985 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) | ||
1986 | { | ||
1987 | struct hci_conn *conn = chan->conn; | ||
1988 | struct hci_dev *hdev = conn->hdev; | ||
1989 | |||
1990 | BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); | ||
1995 | 1991 | ||
1996 | tasklet_schedule(&hdev->tx_task); | 1992 | skb->dev = (void *) hdev; |
1993 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; | ||
1994 | hci_add_acl_hdr(skb, conn->handle, flags); | ||
1995 | |||
1996 | hci_queue_acl(conn, &chan->data_q, skb, flags); | ||
1997 | |||
1998 | queue_work(hdev->workqueue, &hdev->tx_work); | ||
1997 | } | 1999 | } |
1998 | EXPORT_SYMBOL(hci_send_acl); | 2000 | EXPORT_SYMBOL(hci_send_acl); |
1999 | 2001 | ||
@@ -2016,7 +2018,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) | |||
2016 | bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; | 2018 | bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; |
2017 | 2019 | ||
2018 | skb_queue_tail(&conn->data_q, skb); | 2020 | skb_queue_tail(&conn->data_q, skb); |
2019 | tasklet_schedule(&hdev->tx_task); | 2021 | queue_work(hdev->workqueue, &hdev->tx_work); |
2020 | } | 2022 | } |
2021 | EXPORT_SYMBOL(hci_send_sco); | 2023 | EXPORT_SYMBOL(hci_send_sco); |
2022 | 2024 | ||
@@ -2026,16 +2028,15 @@ EXPORT_SYMBOL(hci_send_sco); | |||
2026 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) | 2028 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) |
2027 | { | 2029 | { |
2028 | struct hci_conn_hash *h = &hdev->conn_hash; | 2030 | struct hci_conn_hash *h = &hdev->conn_hash; |
2029 | struct hci_conn *conn = NULL; | 2031 | struct hci_conn *conn = NULL, *c; |
2030 | int num = 0, min = ~0; | 2032 | int num = 0, min = ~0; |
2031 | struct list_head *p; | ||
2032 | 2033 | ||
2033 | /* We don't have to lock device here. Connections are always | 2034 | /* We don't have to lock device here. Connections are always |
2034 | * added and removed with TX task disabled. */ | 2035 | * added and removed with TX task disabled. */ |
2035 | list_for_each(p, &h->list) { | ||
2036 | struct hci_conn *c; | ||
2037 | c = list_entry(p, struct hci_conn, list); | ||
2038 | 2036 | ||
2037 | rcu_read_lock(); | ||
2038 | |||
2039 | list_for_each_entry_rcu(c, &h->list, list) { | ||
2039 | if (c->type != type || skb_queue_empty(&c->data_q)) | 2040 | if (c->type != type || skb_queue_empty(&c->data_q)) |
2040 | continue; | 2041 | continue; |
2041 | 2042 | ||
@@ -2053,6 +2054,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int | |||
2053 | break; | 2054 | break; |
2054 | } | 2055 | } |
2055 | 2056 | ||
2057 | rcu_read_unlock(); | ||
2058 | |||
2056 | if (conn) { | 2059 | if (conn) { |
2057 | int cnt, q; | 2060 | int cnt, q; |
2058 | 2061 | ||
@@ -2084,27 +2087,159 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int | |||
2084 | static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | 2087 | static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) |
2085 | { | 2088 | { |
2086 | struct hci_conn_hash *h = &hdev->conn_hash; | 2089 | struct hci_conn_hash *h = &hdev->conn_hash; |
2087 | struct list_head *p; | 2090 | struct hci_conn *c; |
2088 | struct hci_conn *c; | ||
2089 | 2091 | ||
2090 | BT_ERR("%s link tx timeout", hdev->name); | 2092 | BT_ERR("%s link tx timeout", hdev->name); |
2091 | 2093 | ||
2094 | rcu_read_lock(); | ||
2095 | |||
2092 | /* Kill stalled connections */ | 2096 | /* Kill stalled connections */ |
2093 | list_for_each(p, &h->list) { | 2097 | list_for_each_entry_rcu(c, &h->list, list) { |
2094 | c = list_entry(p, struct hci_conn, list); | ||
2095 | if (c->type == type && c->sent) { | 2098 | if (c->type == type && c->sent) { |
2096 | BT_ERR("%s killing stalled connection %s", | 2099 | BT_ERR("%s killing stalled connection %s", |
2097 | hdev->name, batostr(&c->dst)); | 2100 | hdev->name, batostr(&c->dst)); |
2098 | hci_acl_disconn(c, 0x13); | 2101 | hci_acl_disconn(c, 0x13); |
2099 | } | 2102 | } |
2100 | } | 2103 | } |
2104 | |||
2105 | rcu_read_unlock(); | ||
2101 | } | 2106 | } |
2102 | 2107 | ||
2103 | static inline void hci_sched_acl(struct hci_dev *hdev) | 2108 | static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, |
2109 | int *quote) | ||
2104 | { | 2110 | { |
2111 | struct hci_conn_hash *h = &hdev->conn_hash; | ||
2112 | struct hci_chan *chan = NULL; | ||
2113 | int num = 0, min = ~0, cur_prio = 0; | ||
2105 | struct hci_conn *conn; | 2114 | struct hci_conn *conn; |
2115 | int cnt, q, conn_num = 0; | ||
2116 | |||
2117 | BT_DBG("%s", hdev->name); | ||
2118 | |||
2119 | rcu_read_lock(); | ||
2120 | |||
2121 | list_for_each_entry_rcu(conn, &h->list, list) { | ||
2122 | struct hci_chan *tmp; | ||
2123 | |||
2124 | if (conn->type != type) | ||
2125 | continue; | ||
2126 | |||
2127 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | ||
2128 | continue; | ||
2129 | |||
2130 | conn_num++; | ||
2131 | |||
2132 | list_for_each_entry_rcu(tmp, &conn->chan_list, list) { | ||
2133 | struct sk_buff *skb; | ||
2134 | |||
2135 | if (skb_queue_empty(&tmp->data_q)) | ||
2136 | continue; | ||
2137 | |||
2138 | skb = skb_peek(&tmp->data_q); | ||
2139 | if (skb->priority < cur_prio) | ||
2140 | continue; | ||
2141 | |||
2142 | if (skb->priority > cur_prio) { | ||
2143 | num = 0; | ||
2144 | min = ~0; | ||
2145 | cur_prio = skb->priority; | ||
2146 | } | ||
2147 | |||
2148 | num++; | ||
2149 | |||
2150 | if (conn->sent < min) { | ||
2151 | min = conn->sent; | ||
2152 | chan = tmp; | ||
2153 | } | ||
2154 | } | ||
2155 | |||
2156 | if (hci_conn_num(hdev, type) == conn_num) | ||
2157 | break; | ||
2158 | } | ||
2159 | |||
2160 | rcu_read_unlock(); | ||
2161 | |||
2162 | if (!chan) | ||
2163 | return NULL; | ||
2164 | |||
2165 | switch (chan->conn->type) { | ||
2166 | case ACL_LINK: | ||
2167 | cnt = hdev->acl_cnt; | ||
2168 | break; | ||
2169 | case SCO_LINK: | ||
2170 | case ESCO_LINK: | ||
2171 | cnt = hdev->sco_cnt; | ||
2172 | break; | ||
2173 | case LE_LINK: | ||
2174 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; | ||
2175 | break; | ||
2176 | default: | ||
2177 | cnt = 0; | ||
2178 | BT_ERR("Unknown link type"); | ||
2179 | } | ||
2180 | |||
2181 | q = cnt / num; | ||
2182 | *quote = q ? q : 1; | ||
2183 | BT_DBG("chan %p quote %d", chan, *quote); | ||
2184 | return chan; | ||
2185 | } | ||
2186 | |||
2187 | static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) | ||
2188 | { | ||
2189 | struct hci_conn_hash *h = &hdev->conn_hash; | ||
2190 | struct hci_conn *conn; | ||
2191 | int num = 0; | ||
2192 | |||
2193 | BT_DBG("%s", hdev->name); | ||
2194 | |||
2195 | rcu_read_lock(); | ||
2196 | |||
2197 | list_for_each_entry_rcu(conn, &h->list, list) { | ||
2198 | struct hci_chan *chan; | ||
2199 | |||
2200 | if (conn->type != type) | ||
2201 | continue; | ||
2202 | |||
2203 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | ||
2204 | continue; | ||
2205 | |||
2206 | num++; | ||
2207 | |||
2208 | list_for_each_entry_rcu(chan, &conn->chan_list, list) { | ||
2209 | struct sk_buff *skb; | ||
2210 | |||
2211 | if (chan->sent) { | ||
2212 | chan->sent = 0; | ||
2213 | continue; | ||
2214 | } | ||
2215 | |||
2216 | if (skb_queue_empty(&chan->data_q)) | ||
2217 | continue; | ||
2218 | |||
2219 | skb = skb_peek(&chan->data_q); | ||
2220 | if (skb->priority >= HCI_PRIO_MAX - 1) | ||
2221 | continue; | ||
2222 | |||
2223 | skb->priority = HCI_PRIO_MAX - 1; | ||
2224 | |||
2225 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, | ||
2226 | skb->priority); | ||
2227 | } | ||
2228 | |||
2229 | if (hci_conn_num(hdev, type) == num) | ||
2230 | break; | ||
2231 | } | ||
2232 | |||
2233 | rcu_read_unlock(); | ||
2234 | |||
2235 | } | ||
2236 | |||
2237 | static inline void hci_sched_acl(struct hci_dev *hdev) | ||
2238 | { | ||
2239 | struct hci_chan *chan; | ||
2106 | struct sk_buff *skb; | 2240 | struct sk_buff *skb; |
2107 | int quote; | 2241 | int quote; |
2242 | unsigned int cnt; | ||
2108 | 2243 | ||
2109 | BT_DBG("%s", hdev->name); | 2244 | BT_DBG("%s", hdev->name); |
2110 | 2245 | ||
@@ -2118,19 +2253,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev) | |||
2118 | hci_link_tx_to(hdev, ACL_LINK); | 2253 | hci_link_tx_to(hdev, ACL_LINK); |
2119 | } | 2254 | } |
2120 | 2255 | ||
2121 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { | 2256 | cnt = hdev->acl_cnt; |
2122 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | ||
2123 | BT_DBG("skb %p len %d", skb, skb->len); | ||
2124 | 2257 | ||
2125 | hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); | 2258 | while (hdev->acl_cnt && |
2259 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | ||
2260 | u32 priority = (skb_peek(&chan->data_q))->priority; | ||
2261 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | ||
2262 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | ||
2263 | skb->len, skb->priority); | ||
2264 | |||
2265 | /* Stop if priority has changed */ | ||
2266 | if (skb->priority < priority) | ||
2267 | break; | ||
2268 | |||
2269 | skb = skb_dequeue(&chan->data_q); | ||
2270 | |||
2271 | hci_conn_enter_active_mode(chan->conn, | ||
2272 | bt_cb(skb)->force_active); | ||
2126 | 2273 | ||
2127 | hci_send_frame(skb); | 2274 | hci_send_frame(skb); |
2128 | hdev->acl_last_tx = jiffies; | 2275 | hdev->acl_last_tx = jiffies; |
2129 | 2276 | ||
2130 | hdev->acl_cnt--; | 2277 | hdev->acl_cnt--; |
2131 | conn->sent++; | 2278 | chan->sent++; |
2279 | chan->conn->sent++; | ||
2132 | } | 2280 | } |
2133 | } | 2281 | } |
2282 | |||
2283 | if (cnt != hdev->acl_cnt) | ||
2284 | hci_prio_recalculate(hdev, ACL_LINK); | ||
2134 | } | 2285 | } |
2135 | 2286 | ||
2136 | /* Schedule SCO */ | 2287 | /* Schedule SCO */ |
@@ -2182,9 +2333,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev) | |||
2182 | 2333 | ||
2183 | static inline void hci_sched_le(struct hci_dev *hdev) | 2334 | static inline void hci_sched_le(struct hci_dev *hdev) |
2184 | { | 2335 | { |
2185 | struct hci_conn *conn; | 2336 | struct hci_chan *chan; |
2186 | struct sk_buff *skb; | 2337 | struct sk_buff *skb; |
2187 | int quote, cnt; | 2338 | int quote, cnt, tmp; |
2188 | 2339 | ||
2189 | BT_DBG("%s", hdev->name); | 2340 | BT_DBG("%s", hdev->name); |
2190 | 2341 | ||
@@ -2200,30 +2351,42 @@ static inline void hci_sched_le(struct hci_dev *hdev) | |||
2200 | } | 2351 | } |
2201 | 2352 | ||
2202 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; | 2353 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; |
2203 | while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) { | 2354 | tmp = cnt; |
2204 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 2355 | while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { |
2205 | BT_DBG("skb %p len %d", skb, skb->len); | 2356 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2357 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | ||
2358 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | ||
2359 | skb->len, skb->priority); | ||
2360 | |||
2361 | /* Stop if priority has changed */ | ||
2362 | if (skb->priority < priority) | ||
2363 | break; | ||
2364 | |||
2365 | skb = skb_dequeue(&chan->data_q); | ||
2206 | 2366 | ||
2207 | hci_send_frame(skb); | 2367 | hci_send_frame(skb); |
2208 | hdev->le_last_tx = jiffies; | 2368 | hdev->le_last_tx = jiffies; |
2209 | 2369 | ||
2210 | cnt--; | 2370 | cnt--; |
2211 | conn->sent++; | 2371 | chan->sent++; |
2372 | chan->conn->sent++; | ||
2212 | } | 2373 | } |
2213 | } | 2374 | } |
2375 | |||
2214 | if (hdev->le_pkts) | 2376 | if (hdev->le_pkts) |
2215 | hdev->le_cnt = cnt; | 2377 | hdev->le_cnt = cnt; |
2216 | else | 2378 | else |
2217 | hdev->acl_cnt = cnt; | 2379 | hdev->acl_cnt = cnt; |
2380 | |||
2381 | if (cnt != tmp) | ||
2382 | hci_prio_recalculate(hdev, LE_LINK); | ||
2218 | } | 2383 | } |
2219 | 2384 | ||
2220 | static void hci_tx_task(unsigned long arg) | 2385 | static void hci_tx_work(struct work_struct *work) |
2221 | { | 2386 | { |
2222 | struct hci_dev *hdev = (struct hci_dev *) arg; | 2387 | struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); |
2223 | struct sk_buff *skb; | 2388 | struct sk_buff *skb; |
2224 | 2389 | ||
2225 | read_lock(&hci_task_lock); | ||
2226 | |||
2227 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, | 2390 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, |
2228 | hdev->sco_cnt, hdev->le_cnt); | 2391 | hdev->sco_cnt, hdev->le_cnt); |
2229 | 2392 | ||
@@ -2240,8 +2403,6 @@ static void hci_tx_task(unsigned long arg) | |||
2240 | /* Send next queued raw (unknown type) packet */ | 2403 | /* Send next queued raw (unknown type) packet */ |
2241 | while ((skb = skb_dequeue(&hdev->raw_q))) | 2404 | while ((skb = skb_dequeue(&hdev->raw_q))) |
2242 | hci_send_frame(skb); | 2405 | hci_send_frame(skb); |
2243 | |||
2244 | read_unlock(&hci_task_lock); | ||
2245 | } | 2406 | } |
2246 | 2407 | ||
2247 | /* ----- HCI RX task (incoming data processing) ----- */ | 2408 | /* ----- HCI RX task (incoming data processing) ----- */ |
@@ -2268,16 +2429,11 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2268 | hci_dev_unlock(hdev); | 2429 | hci_dev_unlock(hdev); |
2269 | 2430 | ||
2270 | if (conn) { | 2431 | if (conn) { |
2271 | register struct hci_proto *hp; | 2432 | hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); |
2272 | |||
2273 | hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); | ||
2274 | 2433 | ||
2275 | /* Send to upper protocol */ | 2434 | /* Send to upper protocol */ |
2276 | hp = hci_proto[HCI_PROTO_L2CAP]; | 2435 | l2cap_recv_acldata(conn, skb, flags); |
2277 | if (hp && hp->recv_acldata) { | 2436 | return; |
2278 | hp->recv_acldata(conn, skb, flags); | ||
2279 | return; | ||
2280 | } | ||
2281 | } else { | 2437 | } else { |
2282 | BT_ERR("%s ACL packet for unknown connection handle %d", | 2438 | BT_ERR("%s ACL packet for unknown connection handle %d", |
2283 | hdev->name, handle); | 2439 | hdev->name, handle); |
@@ -2306,14 +2462,9 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2306 | hci_dev_unlock(hdev); | 2462 | hci_dev_unlock(hdev); |
2307 | 2463 | ||
2308 | if (conn) { | 2464 | if (conn) { |
2309 | register struct hci_proto *hp; | ||
2310 | |||
2311 | /* Send to upper protocol */ | 2465 | /* Send to upper protocol */ |
2312 | hp = hci_proto[HCI_PROTO_SCO]; | 2466 | sco_recv_scodata(conn, skb); |
2313 | if (hp && hp->recv_scodata) { | 2467 | return; |
2314 | hp->recv_scodata(conn, skb); | ||
2315 | return; | ||
2316 | } | ||
2317 | } else { | 2468 | } else { |
2318 | BT_ERR("%s SCO packet for unknown connection handle %d", | 2469 | BT_ERR("%s SCO packet for unknown connection handle %d", |
2319 | hdev->name, handle); | 2470 | hdev->name, handle); |
@@ -2322,15 +2473,13 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2322 | kfree_skb(skb); | 2473 | kfree_skb(skb); |
2323 | } | 2474 | } |
2324 | 2475 | ||
2325 | static void hci_rx_task(unsigned long arg) | 2476 | static void hci_rx_work(struct work_struct *work) |
2326 | { | 2477 | { |
2327 | struct hci_dev *hdev = (struct hci_dev *) arg; | 2478 | struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); |
2328 | struct sk_buff *skb; | 2479 | struct sk_buff *skb; |
2329 | 2480 | ||
2330 | BT_DBG("%s", hdev->name); | 2481 | BT_DBG("%s", hdev->name); |
2331 | 2482 | ||
2332 | read_lock(&hci_task_lock); | ||
2333 | |||
2334 | while ((skb = skb_dequeue(&hdev->rx_q))) { | 2483 | while ((skb = skb_dequeue(&hdev->rx_q))) { |
2335 | if (atomic_read(&hdev->promisc)) { | 2484 | if (atomic_read(&hdev->promisc)) { |
2336 | /* Send copy to the sockets */ | 2485 | /* Send copy to the sockets */ |
@@ -2355,6 +2504,7 @@ static void hci_rx_task(unsigned long arg) | |||
2355 | /* Process frame */ | 2504 | /* Process frame */ |
2356 | switch (bt_cb(skb)->pkt_type) { | 2505 | switch (bt_cb(skb)->pkt_type) { |
2357 | case HCI_EVENT_PKT: | 2506 | case HCI_EVENT_PKT: |
2507 | BT_DBG("%s Event packet", hdev->name); | ||
2358 | hci_event_packet(hdev, skb); | 2508 | hci_event_packet(hdev, skb); |
2359 | break; | 2509 | break; |
2360 | 2510 | ||
@@ -2373,13 +2523,11 @@ static void hci_rx_task(unsigned long arg) | |||
2373 | break; | 2523 | break; |
2374 | } | 2524 | } |
2375 | } | 2525 | } |
2376 | |||
2377 | read_unlock(&hci_task_lock); | ||
2378 | } | 2526 | } |
2379 | 2527 | ||
2380 | static void hci_cmd_task(unsigned long arg) | 2528 | static void hci_cmd_work(struct work_struct *work) |
2381 | { | 2529 | { |
2382 | struct hci_dev *hdev = (struct hci_dev *) arg; | 2530 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); |
2383 | struct sk_buff *skb; | 2531 | struct sk_buff *skb; |
2384 | 2532 | ||
2385 | BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); | 2533 | BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); |
@@ -2403,7 +2551,38 @@ static void hci_cmd_task(unsigned long arg) | |||
2403 | jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); | 2551 | jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); |
2404 | } else { | 2552 | } else { |
2405 | skb_queue_head(&hdev->cmd_q, skb); | 2553 | skb_queue_head(&hdev->cmd_q, skb); |
2406 | tasklet_schedule(&hdev->cmd_task); | 2554 | queue_work(hdev->workqueue, &hdev->cmd_work); |
2407 | } | 2555 | } |
2408 | } | 2556 | } |
2409 | } | 2557 | } |
2558 | |||
2559 | int hci_do_inquiry(struct hci_dev *hdev, u8 length) | ||
2560 | { | ||
2561 | /* General inquiry access code (GIAC) */ | ||
2562 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; | ||
2563 | struct hci_cp_inquiry cp; | ||
2564 | |||
2565 | BT_DBG("%s", hdev->name); | ||
2566 | |||
2567 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | ||
2568 | return -EINPROGRESS; | ||
2569 | |||
2570 | memset(&cp, 0, sizeof(cp)); | ||
2571 | memcpy(&cp.lap, lap, sizeof(cp.lap)); | ||
2572 | cp.length = length; | ||
2573 | |||
2574 | return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); | ||
2575 | } | ||
2576 | |||
2577 | int hci_cancel_inquiry(struct hci_dev *hdev) | ||
2578 | { | ||
2579 | BT_DBG("%s", hdev->name); | ||
2580 | |||
2581 | if (!test_bit(HCI_INQUIRY, &hdev->flags)) | ||
2582 | return -EPERM; | ||
2583 | |||
2584 | return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); | ||
2585 | } | ||
2586 | |||
2587 | module_param(enable_hs, bool, 0644); | ||
2588 | MODULE_PARM_DESC(enable_hs, "Enable High Speed"); | ||
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 643a41b76e2e..4221bd256bdd 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <net/bluetooth/bluetooth.h> | 45 | #include <net/bluetooth/bluetooth.h> |
46 | #include <net/bluetooth/hci_core.h> | 46 | #include <net/bluetooth/hci_core.h> |
47 | 47 | ||
48 | static int enable_le; | 48 | static bool enable_le; |
49 | 49 | ||
50 | /* Handle HCI Event packets */ | 50 | /* Handle HCI Event packets */ |
51 | 51 | ||
@@ -55,12 +55,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) | |||
55 | 55 | ||
56 | BT_DBG("%s status 0x%x", hdev->name, status); | 56 | BT_DBG("%s status 0x%x", hdev->name, status); |
57 | 57 | ||
58 | if (status) | 58 | if (status) { |
59 | hci_dev_lock(hdev); | ||
60 | mgmt_stop_discovery_failed(hdev, status); | ||
61 | hci_dev_unlock(hdev); | ||
59 | return; | 62 | return; |
63 | } | ||
60 | 64 | ||
61 | if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && | 65 | clear_bit(HCI_INQUIRY, &hdev->flags); |
62 | test_bit(HCI_MGMT, &hdev->flags)) | 66 | |
63 | mgmt_discovering(hdev->id, 0); | 67 | hci_dev_lock(hdev); |
68 | mgmt_discovering(hdev, 0); | ||
69 | hci_dev_unlock(hdev); | ||
64 | 70 | ||
65 | hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); | 71 | hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); |
66 | 72 | ||
@@ -76,10 +82,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) | |||
76 | if (status) | 82 | if (status) |
77 | return; | 83 | return; |
78 | 84 | ||
79 | if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && | ||
80 | test_bit(HCI_MGMT, &hdev->flags)) | ||
81 | mgmt_discovering(hdev->id, 0); | ||
82 | |||
83 | hci_conn_check_pending(hdev); | 85 | hci_conn_check_pending(hdev); |
84 | } | 86 | } |
85 | 87 | ||
@@ -192,6 +194,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) | |||
192 | clear_bit(HCI_RESET, &hdev->flags); | 194 | clear_bit(HCI_RESET, &hdev->flags); |
193 | 195 | ||
194 | hci_req_complete(hdev, HCI_OP_RESET, status); | 196 | hci_req_complete(hdev, HCI_OP_RESET, status); |
197 | |||
198 | hdev->dev_flags = 0; | ||
195 | } | 199 | } |
196 | 200 | ||
197 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | 201 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -205,13 +209,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | |||
205 | if (!sent) | 209 | if (!sent) |
206 | return; | 210 | return; |
207 | 211 | ||
212 | hci_dev_lock(hdev); | ||
213 | |||
208 | if (test_bit(HCI_MGMT, &hdev->flags)) | 214 | if (test_bit(HCI_MGMT, &hdev->flags)) |
209 | mgmt_set_local_name_complete(hdev->id, sent, status); | 215 | mgmt_set_local_name_complete(hdev, sent, status); |
210 | 216 | ||
211 | if (status) | 217 | if (status == 0) |
212 | return; | 218 | memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); |
213 | 219 | ||
214 | memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); | 220 | hci_dev_unlock(hdev); |
215 | } | 221 | } |
216 | 222 | ||
217 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) | 223 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -274,7 +280,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) | |||
274 | 280 | ||
275 | static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | 281 | static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) |
276 | { | 282 | { |
277 | __u8 status = *((__u8 *) skb->data); | 283 | __u8 param, status = *((__u8 *) skb->data); |
284 | int old_pscan, old_iscan; | ||
278 | void *sent; | 285 | void *sent; |
279 | 286 | ||
280 | BT_DBG("%s status 0x%x", hdev->name, status); | 287 | BT_DBG("%s status 0x%x", hdev->name, status); |
@@ -283,28 +290,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
283 | if (!sent) | 290 | if (!sent) |
284 | return; | 291 | return; |
285 | 292 | ||
286 | if (!status) { | 293 | param = *((__u8 *) sent); |
287 | __u8 param = *((__u8 *) sent); | ||
288 | int old_pscan, old_iscan; | ||
289 | |||
290 | old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); | ||
291 | old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); | ||
292 | 294 | ||
293 | if (param & SCAN_INQUIRY) { | 295 | hci_dev_lock(hdev); |
294 | set_bit(HCI_ISCAN, &hdev->flags); | ||
295 | if (!old_iscan) | ||
296 | mgmt_discoverable(hdev->id, 1); | ||
297 | } else if (old_iscan) | ||
298 | mgmt_discoverable(hdev->id, 0); | ||
299 | 296 | ||
300 | if (param & SCAN_PAGE) { | 297 | if (status != 0) { |
301 | set_bit(HCI_PSCAN, &hdev->flags); | 298 | mgmt_write_scan_failed(hdev, param, status); |
302 | if (!old_pscan) | 299 | hdev->discov_timeout = 0; |
303 | mgmt_connectable(hdev->id, 1); | 300 | goto done; |
304 | } else if (old_pscan) | ||
305 | mgmt_connectable(hdev->id, 0); | ||
306 | } | 301 | } |
307 | 302 | ||
303 | old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); | ||
304 | old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); | ||
305 | |||
306 | if (param & SCAN_INQUIRY) { | ||
307 | set_bit(HCI_ISCAN, &hdev->flags); | ||
308 | if (!old_iscan) | ||
309 | mgmt_discoverable(hdev, 1); | ||
310 | if (hdev->discov_timeout > 0) { | ||
311 | int to = msecs_to_jiffies(hdev->discov_timeout * 1000); | ||
312 | queue_delayed_work(hdev->workqueue, &hdev->discov_off, | ||
313 | to); | ||
314 | } | ||
315 | } else if (old_iscan) | ||
316 | mgmt_discoverable(hdev, 0); | ||
317 | |||
318 | if (param & SCAN_PAGE) { | ||
319 | set_bit(HCI_PSCAN, &hdev->flags); | ||
320 | if (!old_pscan) | ||
321 | mgmt_connectable(hdev, 1); | ||
322 | } else if (old_pscan) | ||
323 | mgmt_connectable(hdev, 0); | ||
324 | |||
325 | done: | ||
326 | hci_dev_unlock(hdev); | ||
308 | hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); | 327 | hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); |
309 | } | 328 | } |
310 | 329 | ||
@@ -359,11 +378,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | |||
359 | 378 | ||
360 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); | 379 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); |
361 | 380 | ||
362 | if (hdev->notify) { | 381 | if (hdev->notify) |
363 | tasklet_disable(&hdev->tx_task); | ||
364 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 382 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); |
365 | tasklet_enable(&hdev->tx_task); | ||
366 | } | ||
367 | } | 383 | } |
368 | 384 | ||
369 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) | 385 | static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -390,11 +406,8 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb | |||
390 | 406 | ||
391 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); | 407 | BT_DBG("%s voice setting 0x%04x", hdev->name, setting); |
392 | 408 | ||
393 | if (hdev->notify) { | 409 | if (hdev->notify) |
394 | tasklet_disable(&hdev->tx_task); | ||
395 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 410 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); |
396 | tasklet_enable(&hdev->tx_task); | ||
397 | } | ||
398 | } | 411 | } |
399 | 412 | ||
400 | static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | 413 | static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -481,7 +494,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev) | |||
481 | 494 | ||
482 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set | 495 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set |
483 | * any event mask for pre 1.2 devices */ | 496 | * any event mask for pre 1.2 devices */ |
484 | if (hdev->lmp_ver <= 1) | 497 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
485 | return; | 498 | return; |
486 | 499 | ||
487 | events[4] |= 0x01; /* Flow Specification Complete */ | 500 | events[4] |= 0x01; /* Flow Specification Complete */ |
@@ -543,9 +556,12 @@ static void hci_set_le_support(struct hci_dev *hdev) | |||
543 | 556 | ||
544 | static void hci_setup(struct hci_dev *hdev) | 557 | static void hci_setup(struct hci_dev *hdev) |
545 | { | 558 | { |
559 | if (hdev->dev_type != HCI_BREDR) | ||
560 | return; | ||
561 | |||
546 | hci_setup_event_mask(hdev); | 562 | hci_setup_event_mask(hdev); |
547 | 563 | ||
548 | if (hdev->hci_ver > 1) | 564 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) |
549 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | 565 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); |
550 | 566 | ||
551 | if (hdev->features[6] & LMP_SIMPLE_PAIR) { | 567 | if (hdev->features[6] & LMP_SIMPLE_PAIR) { |
@@ -700,6 +716,21 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, | |||
700 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); | 716 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); |
701 | } | 717 | } |
702 | 718 | ||
719 | static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, | ||
720 | struct sk_buff *skb) | ||
721 | { | ||
722 | struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; | ||
723 | |||
724 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
725 | |||
726 | if (rp->status) | ||
727 | return; | ||
728 | |||
729 | hdev->flow_ctl_mode = rp->mode; | ||
730 | |||
731 | hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status); | ||
732 | } | ||
733 | |||
703 | static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | 734 | static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) |
704 | { | 735 | { |
705 | struct hci_rp_read_buffer_size *rp = (void *) skb->data; | 736 | struct hci_rp_read_buffer_size *rp = (void *) skb->data; |
@@ -739,6 +770,28 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) | |||
739 | hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); | 770 | hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); |
740 | } | 771 | } |
741 | 772 | ||
773 | static void hci_cc_read_data_block_size(struct hci_dev *hdev, | ||
774 | struct sk_buff *skb) | ||
775 | { | ||
776 | struct hci_rp_read_data_block_size *rp = (void *) skb->data; | ||
777 | |||
778 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
779 | |||
780 | if (rp->status) | ||
781 | return; | ||
782 | |||
783 | hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); | ||
784 | hdev->block_len = __le16_to_cpu(rp->block_len); | ||
785 | hdev->num_blocks = __le16_to_cpu(rp->num_blocks); | ||
786 | |||
787 | hdev->block_cnt = hdev->num_blocks; | ||
788 | |||
789 | BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, | ||
790 | hdev->block_cnt, hdev->block_len); | ||
791 | |||
792 | hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); | ||
793 | } | ||
794 | |||
742 | static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) | 795 | static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) |
743 | { | 796 | { |
744 | __u8 status = *((__u8 *) skb->data); | 797 | __u8 status = *((__u8 *) skb->data); |
@@ -748,6 +801,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) | |||
748 | hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); | 801 | hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); |
749 | } | 802 | } |
750 | 803 | ||
804 | static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | ||
805 | struct sk_buff *skb) | ||
806 | { | ||
807 | struct hci_rp_read_local_amp_info *rp = (void *) skb->data; | ||
808 | |||
809 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
810 | |||
811 | if (rp->status) | ||
812 | return; | ||
813 | |||
814 | hdev->amp_status = rp->amp_status; | ||
815 | hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); | ||
816 | hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); | ||
817 | hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); | ||
818 | hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); | ||
819 | hdev->amp_type = rp->amp_type; | ||
820 | hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); | ||
821 | hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); | ||
822 | hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); | ||
823 | hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); | ||
824 | |||
825 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); | ||
826 | } | ||
827 | |||
751 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, | 828 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, |
752 | struct sk_buff *skb) | 829 | struct sk_buff *skb) |
753 | { | 830 | { |
@@ -804,19 +881,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
804 | 881 | ||
805 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | 882 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
806 | 883 | ||
884 | hci_dev_lock(hdev); | ||
885 | |||
807 | if (test_bit(HCI_MGMT, &hdev->flags)) | 886 | if (test_bit(HCI_MGMT, &hdev->flags)) |
808 | mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); | 887 | mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); |
809 | 888 | ||
810 | if (rp->status != 0) | 889 | if (rp->status != 0) |
811 | return; | 890 | goto unlock; |
812 | 891 | ||
813 | cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); | 892 | cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); |
814 | if (!cp) | 893 | if (!cp) |
815 | return; | 894 | goto unlock; |
816 | 895 | ||
817 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); | 896 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); |
818 | if (conn) | 897 | if (conn) |
819 | conn->pin_length = cp->pin_len; | 898 | conn->pin_length = cp->pin_len; |
899 | |||
900 | unlock: | ||
901 | hci_dev_unlock(hdev); | ||
820 | } | 902 | } |
821 | 903 | ||
822 | static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) | 904 | static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -825,10 +907,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
825 | 907 | ||
826 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | 908 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
827 | 909 | ||
910 | hci_dev_lock(hdev); | ||
911 | |||
828 | if (test_bit(HCI_MGMT, &hdev->flags)) | 912 | if (test_bit(HCI_MGMT, &hdev->flags)) |
829 | mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, | 913 | mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, |
830 | rp->status); | 914 | rp->status); |
915 | |||
916 | hci_dev_unlock(hdev); | ||
831 | } | 917 | } |
918 | |||
832 | static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, | 919 | static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, |
833 | struct sk_buff *skb) | 920 | struct sk_buff *skb) |
834 | { | 921 | { |
@@ -855,9 +942,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) | |||
855 | 942 | ||
856 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | 943 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
857 | 944 | ||
945 | hci_dev_lock(hdev); | ||
946 | |||
858 | if (test_bit(HCI_MGMT, &hdev->flags)) | 947 | if (test_bit(HCI_MGMT, &hdev->flags)) |
859 | mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, | 948 | mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, |
860 | rp->status); | 949 | rp->status); |
950 | |||
951 | hci_dev_unlock(hdev); | ||
861 | } | 952 | } |
862 | 953 | ||
863 | static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, | 954 | static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, |
@@ -867,9 +958,44 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, | |||
867 | 958 | ||
868 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | 959 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
869 | 960 | ||
961 | hci_dev_lock(hdev); | ||
962 | |||
963 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
964 | mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, | ||
965 | rp->status); | ||
966 | |||
967 | hci_dev_unlock(hdev); | ||
968 | } | ||
969 | |||
970 | static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) | ||
971 | { | ||
972 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; | ||
973 | |||
974 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
975 | |||
976 | hci_dev_lock(hdev); | ||
977 | |||
978 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
979 | mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, | ||
980 | rp->status); | ||
981 | |||
982 | hci_dev_unlock(hdev); | ||
983 | } | ||
984 | |||
985 | static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, | ||
986 | struct sk_buff *skb) | ||
987 | { | ||
988 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; | ||
989 | |||
990 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | ||
991 | |||
992 | hci_dev_lock(hdev); | ||
993 | |||
870 | if (test_bit(HCI_MGMT, &hdev->flags)) | 994 | if (test_bit(HCI_MGMT, &hdev->flags)) |
871 | mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, | 995 | mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, |
872 | rp->status); | 996 | rp->status); |
997 | |||
998 | hci_dev_unlock(hdev); | ||
873 | } | 999 | } |
874 | 1000 | ||
875 | static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, | 1001 | static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, |
@@ -879,8 +1005,17 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, | |||
879 | 1005 | ||
880 | BT_DBG("%s status 0x%x", hdev->name, rp->status); | 1006 | BT_DBG("%s status 0x%x", hdev->name, rp->status); |
881 | 1007 | ||
882 | mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, | 1008 | hci_dev_lock(hdev); |
1009 | mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, | ||
883 | rp->randomizer, rp->status); | 1010 | rp->randomizer, rp->status); |
1011 | hci_dev_unlock(hdev); | ||
1012 | } | ||
1013 | |||
1014 | static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) | ||
1015 | { | ||
1016 | __u8 status = *((__u8 *) skb->data); | ||
1017 | |||
1018 | BT_DBG("%s status 0x%x", hdev->name, status); | ||
884 | } | 1019 | } |
885 | 1020 | ||
886 | static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, | 1021 | static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, |
@@ -898,14 +1033,28 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, | |||
898 | if (!cp) | 1033 | if (!cp) |
899 | return; | 1034 | return; |
900 | 1035 | ||
901 | if (cp->enable == 0x01) { | 1036 | switch (cp->enable) { |
902 | del_timer(&hdev->adv_timer); | 1037 | case LE_SCANNING_ENABLED: |
1038 | set_bit(HCI_LE_SCAN, &hdev->dev_flags); | ||
1039 | |||
1040 | cancel_delayed_work_sync(&hdev->adv_work); | ||
903 | 1041 | ||
904 | hci_dev_lock(hdev); | 1042 | hci_dev_lock(hdev); |
905 | hci_adv_entries_clear(hdev); | 1043 | hci_adv_entries_clear(hdev); |
906 | hci_dev_unlock(hdev); | 1044 | hci_dev_unlock(hdev); |
907 | } else if (cp->enable == 0x00) { | 1045 | break; |
908 | mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); | 1046 | |
1047 | case LE_SCANNING_DISABLED: | ||
1048 | clear_bit(HCI_LE_SCAN, &hdev->dev_flags); | ||
1049 | |||
1050 | cancel_delayed_work_sync(&hdev->adv_work); | ||
1051 | queue_delayed_work(hdev->workqueue, &hdev->adv_work, | ||
1052 | jiffies + ADV_CLEAR_TIMEOUT); | ||
1053 | break; | ||
1054 | |||
1055 | default: | ||
1056 | BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); | ||
1057 | break; | ||
909 | } | 1058 | } |
910 | } | 1059 | } |
911 | 1060 | ||
@@ -955,12 +1104,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | |||
955 | if (status) { | 1104 | if (status) { |
956 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); | 1105 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); |
957 | hci_conn_check_pending(hdev); | 1106 | hci_conn_check_pending(hdev); |
1107 | hci_dev_lock(hdev); | ||
1108 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
1109 | mgmt_start_discovery_failed(hdev, status); | ||
1110 | hci_dev_unlock(hdev); | ||
958 | return; | 1111 | return; |
959 | } | 1112 | } |
960 | 1113 | ||
961 | if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && | 1114 | set_bit(HCI_INQUIRY, &hdev->flags); |
962 | test_bit(HCI_MGMT, &hdev->flags)) | 1115 | |
963 | mgmt_discovering(hdev->id, 1); | 1116 | hci_dev_lock(hdev); |
1117 | mgmt_discovering(hdev, 1); | ||
1118 | hci_dev_unlock(hdev); | ||
964 | } | 1119 | } |
965 | 1120 | ||
966 | static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | 1121 | static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) |
@@ -1339,13 +1494,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
1339 | 1494 | ||
1340 | BT_DBG("%s status %d", hdev->name, status); | 1495 | BT_DBG("%s status %d", hdev->name, status); |
1341 | 1496 | ||
1342 | if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && | ||
1343 | test_bit(HCI_MGMT, &hdev->flags)) | ||
1344 | mgmt_discovering(hdev->id, 0); | ||
1345 | |||
1346 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); | 1497 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); |
1347 | 1498 | ||
1348 | hci_conn_check_pending(hdev); | 1499 | hci_conn_check_pending(hdev); |
1500 | |||
1501 | if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) | ||
1502 | return; | ||
1503 | |||
1504 | hci_dev_lock(hdev); | ||
1505 | mgmt_discovering(hdev, 0); | ||
1506 | hci_dev_unlock(hdev); | ||
1349 | } | 1507 | } |
1350 | 1508 | ||
1351 | static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1509 | static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1361,12 +1519,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * | |||
1361 | 1519 | ||
1362 | hci_dev_lock(hdev); | 1520 | hci_dev_lock(hdev); |
1363 | 1521 | ||
1364 | if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { | ||
1365 | |||
1366 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
1367 | mgmt_discovering(hdev->id, 1); | ||
1368 | } | ||
1369 | |||
1370 | for (; num_rsp; num_rsp--, info++) { | 1522 | for (; num_rsp; num_rsp--, info++) { |
1371 | bacpy(&data.bdaddr, &info->bdaddr); | 1523 | bacpy(&data.bdaddr, &info->bdaddr); |
1372 | data.pscan_rep_mode = info->pscan_rep_mode; | 1524 | data.pscan_rep_mode = info->pscan_rep_mode; |
@@ -1377,8 +1529,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * | |||
1377 | data.rssi = 0x00; | 1529 | data.rssi = 0x00; |
1378 | data.ssp_mode = 0x00; | 1530 | data.ssp_mode = 0x00; |
1379 | hci_inquiry_cache_update(hdev, &data); | 1531 | hci_inquiry_cache_update(hdev, &data); |
1380 | mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, | 1532 | mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, |
1381 | NULL); | 1533 | info->dev_class, 0, NULL); |
1382 | } | 1534 | } |
1383 | 1535 | ||
1384 | hci_dev_unlock(hdev); | 1536 | hci_dev_unlock(hdev); |
@@ -1412,7 +1564,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1412 | conn->state = BT_CONFIG; | 1564 | conn->state = BT_CONFIG; |
1413 | hci_conn_hold(conn); | 1565 | hci_conn_hold(conn); |
1414 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | 1566 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; |
1415 | mgmt_connected(hdev->id, &ev->bdaddr, conn->type); | 1567 | mgmt_connected(hdev, &ev->bdaddr, conn->type, |
1568 | conn->dst_type); | ||
1416 | } else | 1569 | } else |
1417 | conn->state = BT_CONNECTED; | 1570 | conn->state = BT_CONNECTED; |
1418 | 1571 | ||
@@ -1434,7 +1587,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1434 | } | 1587 | } |
1435 | 1588 | ||
1436 | /* Set packet type for incoming connection */ | 1589 | /* Set packet type for incoming connection */ |
1437 | if (!conn->out && hdev->hci_ver < 3) { | 1590 | if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { |
1438 | struct hci_cp_change_conn_ptype cp; | 1591 | struct hci_cp_change_conn_ptype cp; |
1439 | cp.handle = ev->handle; | 1592 | cp.handle = ev->handle; |
1440 | cp.pkt_type = cpu_to_le16(conn->pkt_type); | 1593 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
@@ -1444,7 +1597,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1444 | } else { | 1597 | } else { |
1445 | conn->state = BT_CLOSED; | 1598 | conn->state = BT_CLOSED; |
1446 | if (conn->type == ACL_LINK) | 1599 | if (conn->type == ACL_LINK) |
1447 | mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); | 1600 | mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, |
1601 | conn->dst_type, ev->status); | ||
1448 | } | 1602 | } |
1449 | 1603 | ||
1450 | if (conn->type == ACL_LINK) | 1604 | if (conn->type == ACL_LINK) |
@@ -1531,7 +1685,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1531 | struct hci_cp_reject_conn_req cp; | 1685 | struct hci_cp_reject_conn_req cp; |
1532 | 1686 | ||
1533 | bacpy(&cp.bdaddr, &ev->bdaddr); | 1687 | bacpy(&cp.bdaddr, &ev->bdaddr); |
1534 | cp.reason = 0x0f; | 1688 | cp.reason = HCI_ERROR_REJ_BAD_ADDR; |
1535 | hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); | 1689 | hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); |
1536 | } | 1690 | } |
1537 | } | 1691 | } |
@@ -1543,24 +1697,27 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
1543 | 1697 | ||
1544 | BT_DBG("%s status %d", hdev->name, ev->status); | 1698 | BT_DBG("%s status %d", hdev->name, ev->status); |
1545 | 1699 | ||
1546 | if (ev->status) { | ||
1547 | mgmt_disconnect_failed(hdev->id); | ||
1548 | return; | ||
1549 | } | ||
1550 | |||
1551 | hci_dev_lock(hdev); | 1700 | hci_dev_lock(hdev); |
1552 | 1701 | ||
1553 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); | 1702 | conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); |
1554 | if (!conn) | 1703 | if (!conn) |
1555 | goto unlock; | 1704 | goto unlock; |
1556 | 1705 | ||
1557 | conn->state = BT_CLOSED; | 1706 | if (ev->status == 0) |
1707 | conn->state = BT_CLOSED; | ||
1558 | 1708 | ||
1559 | if (conn->type == ACL_LINK || conn->type == LE_LINK) | 1709 | if (conn->type == ACL_LINK || conn->type == LE_LINK) { |
1560 | mgmt_disconnected(hdev->id, &conn->dst); | 1710 | if (ev->status != 0) |
1711 | mgmt_disconnect_failed(hdev, &conn->dst, ev->status); | ||
1712 | else | ||
1713 | mgmt_disconnected(hdev, &conn->dst, conn->type, | ||
1714 | conn->dst_type); | ||
1715 | } | ||
1561 | 1716 | ||
1562 | hci_proto_disconn_cfm(conn, ev->reason); | 1717 | if (ev->status == 0) { |
1563 | hci_conn_del(conn); | 1718 | hci_proto_disconn_cfm(conn, ev->reason); |
1719 | hci_conn_del(conn); | ||
1720 | } | ||
1564 | 1721 | ||
1565 | unlock: | 1722 | unlock: |
1566 | hci_dev_unlock(hdev); | 1723 | hci_dev_unlock(hdev); |
@@ -1588,7 +1745,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1588 | conn->sec_level = conn->pending_sec_level; | 1745 | conn->sec_level = conn->pending_sec_level; |
1589 | } | 1746 | } |
1590 | } else { | 1747 | } else { |
1591 | mgmt_auth_failed(hdev->id, &conn->dst, ev->status); | 1748 | mgmt_auth_failed(hdev, &conn->dst, ev->status); |
1592 | } | 1749 | } |
1593 | 1750 | ||
1594 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); | 1751 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); |
@@ -1643,7 +1800,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
1643 | hci_dev_lock(hdev); | 1800 | hci_dev_lock(hdev); |
1644 | 1801 | ||
1645 | if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) | 1802 | if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) |
1646 | mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); | 1803 | mgmt_remote_name(hdev, &ev->bdaddr, ev->name); |
1647 | 1804 | ||
1648 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | 1805 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); |
1649 | if (!conn) | 1806 | if (!conn) |
@@ -1894,10 +2051,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1894 | hci_cc_read_bd_addr(hdev, skb); | 2051 | hci_cc_read_bd_addr(hdev, skb); |
1895 | break; | 2052 | break; |
1896 | 2053 | ||
2054 | case HCI_OP_READ_DATA_BLOCK_SIZE: | ||
2055 | hci_cc_read_data_block_size(hdev, skb); | ||
2056 | break; | ||
2057 | |||
1897 | case HCI_OP_WRITE_CA_TIMEOUT: | 2058 | case HCI_OP_WRITE_CA_TIMEOUT: |
1898 | hci_cc_write_ca_timeout(hdev, skb); | 2059 | hci_cc_write_ca_timeout(hdev, skb); |
1899 | break; | 2060 | break; |
1900 | 2061 | ||
2062 | case HCI_OP_READ_FLOW_CONTROL_MODE: | ||
2063 | hci_cc_read_flow_control_mode(hdev, skb); | ||
2064 | break; | ||
2065 | |||
2066 | case HCI_OP_READ_LOCAL_AMP_INFO: | ||
2067 | hci_cc_read_local_amp_info(hdev, skb); | ||
2068 | break; | ||
2069 | |||
1901 | case HCI_OP_DELETE_STORED_LINK_KEY: | 2070 | case HCI_OP_DELETE_STORED_LINK_KEY: |
1902 | hci_cc_delete_stored_link_key(hdev, skb); | 2071 | hci_cc_delete_stored_link_key(hdev, skb); |
1903 | break; | 2072 | break; |
@@ -1942,6 +2111,17 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1942 | hci_cc_user_confirm_neg_reply(hdev, skb); | 2111 | hci_cc_user_confirm_neg_reply(hdev, skb); |
1943 | break; | 2112 | break; |
1944 | 2113 | ||
2114 | case HCI_OP_USER_PASSKEY_REPLY: | ||
2115 | hci_cc_user_passkey_reply(hdev, skb); | ||
2116 | break; | ||
2117 | |||
2118 | case HCI_OP_USER_PASSKEY_NEG_REPLY: | ||
2119 | hci_cc_user_passkey_neg_reply(hdev, skb); | ||
2120 | |||
2121 | case HCI_OP_LE_SET_SCAN_PARAM: | ||
2122 | hci_cc_le_set_scan_param(hdev, skb); | ||
2123 | break; | ||
2124 | |||
1945 | case HCI_OP_LE_SET_SCAN_ENABLE: | 2125 | case HCI_OP_LE_SET_SCAN_ENABLE: |
1946 | hci_cc_le_set_scan_enable(hdev, skb); | 2126 | hci_cc_le_set_scan_enable(hdev, skb); |
1947 | break; | 2127 | break; |
@@ -1969,7 +2149,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk | |||
1969 | if (ev->ncmd) { | 2149 | if (ev->ncmd) { |
1970 | atomic_set(&hdev->cmd_cnt, 1); | 2150 | atomic_set(&hdev->cmd_cnt, 1); |
1971 | if (!skb_queue_empty(&hdev->cmd_q)) | 2151 | if (!skb_queue_empty(&hdev->cmd_q)) |
1972 | tasklet_schedule(&hdev->cmd_task); | 2152 | queue_work(hdev->workqueue, &hdev->cmd_work); |
1973 | } | 2153 | } |
1974 | } | 2154 | } |
1975 | 2155 | ||
@@ -2029,7 +2209,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2029 | 2209 | ||
2030 | case HCI_OP_DISCONNECT: | 2210 | case HCI_OP_DISCONNECT: |
2031 | if (ev->status != 0) | 2211 | if (ev->status != 0) |
2032 | mgmt_disconnect_failed(hdev->id); | 2212 | mgmt_disconnect_failed(hdev, NULL, ev->status); |
2033 | break; | 2213 | break; |
2034 | 2214 | ||
2035 | case HCI_OP_LE_CREATE_CONN: | 2215 | case HCI_OP_LE_CREATE_CONN: |
@@ -2051,7 +2231,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2051 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { | 2231 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { |
2052 | atomic_set(&hdev->cmd_cnt, 1); | 2232 | atomic_set(&hdev->cmd_cnt, 1); |
2053 | if (!skb_queue_empty(&hdev->cmd_q)) | 2233 | if (!skb_queue_empty(&hdev->cmd_q)) |
2054 | tasklet_schedule(&hdev->cmd_task); | 2234 | queue_work(hdev->workqueue, &hdev->cmd_work); |
2055 | } | 2235 | } |
2056 | } | 2236 | } |
2057 | 2237 | ||
@@ -2084,56 +2264,68 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
2084 | static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2264 | static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2085 | { | 2265 | { |
2086 | struct hci_ev_num_comp_pkts *ev = (void *) skb->data; | 2266 | struct hci_ev_num_comp_pkts *ev = (void *) skb->data; |
2087 | __le16 *ptr; | ||
2088 | int i; | 2267 | int i; |
2089 | 2268 | ||
2090 | skb_pull(skb, sizeof(*ev)); | 2269 | skb_pull(skb, sizeof(*ev)); |
2091 | 2270 | ||
2092 | BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); | 2271 | BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); |
2093 | 2272 | ||
2273 | if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { | ||
2274 | BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); | ||
2275 | return; | ||
2276 | } | ||
2277 | |||
2094 | if (skb->len < ev->num_hndl * 4) { | 2278 | if (skb->len < ev->num_hndl * 4) { |
2095 | BT_DBG("%s bad parameters", hdev->name); | 2279 | BT_DBG("%s bad parameters", hdev->name); |
2096 | return; | 2280 | return; |
2097 | } | 2281 | } |
2098 | 2282 | ||
2099 | tasklet_disable(&hdev->tx_task); | 2283 | for (i = 0; i < ev->num_hndl; i++) { |
2100 | 2284 | struct hci_comp_pkts_info *info = &ev->handles[i]; | |
2101 | for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { | ||
2102 | struct hci_conn *conn; | 2285 | struct hci_conn *conn; |
2103 | __u16 handle, count; | 2286 | __u16 handle, count; |
2104 | 2287 | ||
2105 | handle = get_unaligned_le16(ptr++); | 2288 | handle = __le16_to_cpu(info->handle); |
2106 | count = get_unaligned_le16(ptr++); | 2289 | count = __le16_to_cpu(info->count); |
2107 | 2290 | ||
2108 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 2291 | conn = hci_conn_hash_lookup_handle(hdev, handle); |
2109 | if (conn) { | 2292 | if (!conn) |
2110 | conn->sent -= count; | 2293 | continue; |
2111 | 2294 | ||
2112 | if (conn->type == ACL_LINK) { | 2295 | conn->sent -= count; |
2296 | |||
2297 | switch (conn->type) { | ||
2298 | case ACL_LINK: | ||
2299 | hdev->acl_cnt += count; | ||
2300 | if (hdev->acl_cnt > hdev->acl_pkts) | ||
2301 | hdev->acl_cnt = hdev->acl_pkts; | ||
2302 | break; | ||
2303 | |||
2304 | case LE_LINK: | ||
2305 | if (hdev->le_pkts) { | ||
2306 | hdev->le_cnt += count; | ||
2307 | if (hdev->le_cnt > hdev->le_pkts) | ||
2308 | hdev->le_cnt = hdev->le_pkts; | ||
2309 | } else { | ||
2113 | hdev->acl_cnt += count; | 2310 | hdev->acl_cnt += count; |
2114 | if (hdev->acl_cnt > hdev->acl_pkts) | 2311 | if (hdev->acl_cnt > hdev->acl_pkts) |
2115 | hdev->acl_cnt = hdev->acl_pkts; | 2312 | hdev->acl_cnt = hdev->acl_pkts; |
2116 | } else if (conn->type == LE_LINK) { | ||
2117 | if (hdev->le_pkts) { | ||
2118 | hdev->le_cnt += count; | ||
2119 | if (hdev->le_cnt > hdev->le_pkts) | ||
2120 | hdev->le_cnt = hdev->le_pkts; | ||
2121 | } else { | ||
2122 | hdev->acl_cnt += count; | ||
2123 | if (hdev->acl_cnt > hdev->acl_pkts) | ||
2124 | hdev->acl_cnt = hdev->acl_pkts; | ||
2125 | } | ||
2126 | } else { | ||
2127 | hdev->sco_cnt += count; | ||
2128 | if (hdev->sco_cnt > hdev->sco_pkts) | ||
2129 | hdev->sco_cnt = hdev->sco_pkts; | ||
2130 | } | 2313 | } |
2314 | break; | ||
2315 | |||
2316 | case SCO_LINK: | ||
2317 | hdev->sco_cnt += count; | ||
2318 | if (hdev->sco_cnt > hdev->sco_pkts) | ||
2319 | hdev->sco_cnt = hdev->sco_pkts; | ||
2320 | break; | ||
2321 | |||
2322 | default: | ||
2323 | BT_ERR("Unknown type %d conn %p", conn->type, conn); | ||
2324 | break; | ||
2131 | } | 2325 | } |
2132 | } | 2326 | } |
2133 | 2327 | ||
2134 | tasklet_schedule(&hdev->tx_task); | 2328 | queue_work(hdev->workqueue, &hdev->tx_work); |
2135 | |||
2136 | tasklet_enable(&hdev->tx_task); | ||
2137 | } | 2329 | } |
2138 | 2330 | ||
2139 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2331 | static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -2194,7 +2386,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2194 | else | 2386 | else |
2195 | secure = 0; | 2387 | secure = 0; |
2196 | 2388 | ||
2197 | mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); | 2389 | mgmt_pin_code_request(hdev, &ev->bdaddr, secure); |
2198 | } | 2390 | } |
2199 | 2391 | ||
2200 | unlock: | 2392 | unlock: |
@@ -2363,12 +2555,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
2363 | 2555 | ||
2364 | hci_dev_lock(hdev); | 2556 | hci_dev_lock(hdev); |
2365 | 2557 | ||
2366 | if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { | ||
2367 | |||
2368 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
2369 | mgmt_discovering(hdev->id, 1); | ||
2370 | } | ||
2371 | |||
2372 | if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { | 2558 | if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { |
2373 | struct inquiry_info_with_rssi_and_pscan_mode *info; | 2559 | struct inquiry_info_with_rssi_and_pscan_mode *info; |
2374 | info = (void *) (skb->data + 1); | 2560 | info = (void *) (skb->data + 1); |
@@ -2383,7 +2569,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
2383 | data.rssi = info->rssi; | 2569 | data.rssi = info->rssi; |
2384 | data.ssp_mode = 0x00; | 2570 | data.ssp_mode = 0x00; |
2385 | hci_inquiry_cache_update(hdev, &data); | 2571 | hci_inquiry_cache_update(hdev, &data); |
2386 | mgmt_device_found(hdev->id, &info->bdaddr, | 2572 | mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, |
2387 | info->dev_class, info->rssi, | 2573 | info->dev_class, info->rssi, |
2388 | NULL); | 2574 | NULL); |
2389 | } | 2575 | } |
@@ -2400,7 +2586,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct | |||
2400 | data.rssi = info->rssi; | 2586 | data.rssi = info->rssi; |
2401 | data.ssp_mode = 0x00; | 2587 | data.ssp_mode = 0x00; |
2402 | hci_inquiry_cache_update(hdev, &data); | 2588 | hci_inquiry_cache_update(hdev, &data); |
2403 | mgmt_device_found(hdev->id, &info->bdaddr, | 2589 | mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, |
2404 | info->dev_class, info->rssi, | 2590 | info->dev_class, info->rssi, |
2405 | NULL); | 2591 | NULL); |
2406 | } | 2592 | } |
@@ -2531,12 +2717,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
2531 | if (!num_rsp) | 2717 | if (!num_rsp) |
2532 | return; | 2718 | return; |
2533 | 2719 | ||
2534 | if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { | ||
2535 | |||
2536 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
2537 | mgmt_discovering(hdev->id, 1); | ||
2538 | } | ||
2539 | |||
2540 | hci_dev_lock(hdev); | 2720 | hci_dev_lock(hdev); |
2541 | 2721 | ||
2542 | for (; num_rsp; num_rsp--, info++) { | 2722 | for (; num_rsp; num_rsp--, info++) { |
@@ -2549,8 +2729,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct | |||
2549 | data.rssi = info->rssi; | 2729 | data.rssi = info->rssi; |
2550 | data.ssp_mode = 0x01; | 2730 | data.ssp_mode = 0x01; |
2551 | hci_inquiry_cache_update(hdev, &data); | 2731 | hci_inquiry_cache_update(hdev, &data); |
2552 | mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, | 2732 | mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, |
2553 | info->rssi, info->data); | 2733 | info->dev_class, info->rssi, info->data); |
2554 | } | 2734 | } |
2555 | 2735 | ||
2556 | hci_dev_unlock(hdev); | 2736 | hci_dev_unlock(hdev); |
@@ -2614,7 +2794,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff | |||
2614 | struct hci_cp_io_capability_neg_reply cp; | 2794 | struct hci_cp_io_capability_neg_reply cp; |
2615 | 2795 | ||
2616 | bacpy(&cp.bdaddr, &ev->bdaddr); | 2796 | bacpy(&cp.bdaddr, &ev->bdaddr); |
2617 | cp.reason = 0x18; /* Pairing not allowed */ | 2797 | cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; |
2618 | 2798 | ||
2619 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, | 2799 | hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, |
2620 | sizeof(cp), &cp); | 2800 | sizeof(cp), &cp); |
@@ -2706,13 +2886,28 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, | |||
2706 | } | 2886 | } |
2707 | 2887 | ||
2708 | confirm: | 2888 | confirm: |
2709 | mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, | 2889 | mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey, |
2710 | confirm_hint); | 2890 | confirm_hint); |
2711 | 2891 | ||
2712 | unlock: | 2892 | unlock: |
2713 | hci_dev_unlock(hdev); | 2893 | hci_dev_unlock(hdev); |
2714 | } | 2894 | } |
2715 | 2895 | ||
2896 | static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, | ||
2897 | struct sk_buff *skb) | ||
2898 | { | ||
2899 | struct hci_ev_user_passkey_req *ev = (void *) skb->data; | ||
2900 | |||
2901 | BT_DBG("%s", hdev->name); | ||
2902 | |||
2903 | hci_dev_lock(hdev); | ||
2904 | |||
2905 | if (test_bit(HCI_MGMT, &hdev->flags)) | ||
2906 | mgmt_user_passkey_request(hdev, &ev->bdaddr); | ||
2907 | |||
2908 | hci_dev_unlock(hdev); | ||
2909 | } | ||
2910 | |||
2716 | static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2911 | static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2717 | { | 2912 | { |
2718 | struct hci_ev_simple_pair_complete *ev = (void *) skb->data; | 2913 | struct hci_ev_simple_pair_complete *ev = (void *) skb->data; |
@@ -2732,7 +2927,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_ | |||
2732 | * event gets always produced as initiator and is also mapped to | 2927 | * event gets always produced as initiator and is also mapped to |
2733 | * the mgmt_auth_failed event */ | 2928 | * the mgmt_auth_failed event */ |
2734 | if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) | 2929 | if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) |
2735 | mgmt_auth_failed(hdev->id, &conn->dst, ev->status); | 2930 | mgmt_auth_failed(hdev, &conn->dst, ev->status); |
2736 | 2931 | ||
2737 | hci_conn_put(conn); | 2932 | hci_conn_put(conn); |
2738 | 2933 | ||
@@ -2813,14 +3008,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff | |||
2813 | } | 3008 | } |
2814 | 3009 | ||
2815 | if (ev->status) { | 3010 | if (ev->status) { |
2816 | mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); | 3011 | mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, |
3012 | conn->dst_type, ev->status); | ||
2817 | hci_proto_connect_cfm(conn, ev->status); | 3013 | hci_proto_connect_cfm(conn, ev->status); |
2818 | conn->state = BT_CLOSED; | 3014 | conn->state = BT_CLOSED; |
2819 | hci_conn_del(conn); | 3015 | hci_conn_del(conn); |
2820 | goto unlock; | 3016 | goto unlock; |
2821 | } | 3017 | } |
2822 | 3018 | ||
2823 | mgmt_connected(hdev->id, &ev->bdaddr, conn->type); | 3019 | mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type); |
2824 | 3020 | ||
2825 | conn->sec_level = BT_SECURITY_LOW; | 3021 | conn->sec_level = BT_SECURITY_LOW; |
2826 | conn->handle = __le16_to_cpu(ev->handle); | 3022 | conn->handle = __le16_to_cpu(ev->handle); |
@@ -3051,6 +3247,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
3051 | hci_user_confirm_request_evt(hdev, skb); | 3247 | hci_user_confirm_request_evt(hdev, skb); |
3052 | break; | 3248 | break; |
3053 | 3249 | ||
3250 | case HCI_EV_USER_PASSKEY_REQUEST: | ||
3251 | hci_user_passkey_request_evt(hdev, skb); | ||
3252 | break; | ||
3253 | |||
3054 | case HCI_EV_SIMPLE_PAIR_COMPLETE: | 3254 | case HCI_EV_SIMPLE_PAIR_COMPLETE: |
3055 | hci_simple_pair_complete_evt(hdev, skb); | 3255 | hci_simple_pair_complete_evt(hdev, skb); |
3056 | break; | 3256 | break; |
@@ -3104,5 +3304,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) | |||
3104 | kfree_skb(skb); | 3304 | kfree_skb(skb); |
3105 | } | 3305 | } |
3106 | 3306 | ||
3107 | module_param(enable_le, bool, 0444); | 3307 | module_param(enable_le, bool, 0644); |
3108 | MODULE_PARM_DESC(enable_le, "Enable LE support"); | 3308 | MODULE_PARM_DESC(enable_le, "Enable LE support"); |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f6afe3d76a66..6d94616af312 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include <net/bluetooth/bluetooth.h> | 49 | #include <net/bluetooth/bluetooth.h> |
50 | #include <net/bluetooth/hci_core.h> | 50 | #include <net/bluetooth/hci_core.h> |
51 | 51 | ||
52 | static int enable_mgmt; | 52 | static bool enable_mgmt; |
53 | 53 | ||
54 | /* ----- HCI socket interface ----- */ | 54 | /* ----- HCI socket interface ----- */ |
55 | 55 | ||
@@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) | |||
188 | if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) | 188 | if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) |
189 | return -EFAULT; | 189 | return -EFAULT; |
190 | 190 | ||
191 | hci_dev_lock_bh(hdev); | 191 | hci_dev_lock(hdev); |
192 | 192 | ||
193 | err = hci_blacklist_add(hdev, &bdaddr); | 193 | err = hci_blacklist_add(hdev, &bdaddr); |
194 | 194 | ||
195 | hci_dev_unlock_bh(hdev); | 195 | hci_dev_unlock(hdev); |
196 | 196 | ||
197 | return err; | 197 | return err; |
198 | } | 198 | } |
@@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) | |||
205 | if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) | 205 | if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) |
206 | return -EFAULT; | 206 | return -EFAULT; |
207 | 207 | ||
208 | hci_dev_lock_bh(hdev); | 208 | hci_dev_lock(hdev); |
209 | 209 | ||
210 | err = hci_blacklist_del(hdev, &bdaddr); | 210 | err = hci_blacklist_del(hdev, &bdaddr); |
211 | 211 | ||
212 | hci_dev_unlock_bh(hdev); | 212 | hci_dev_unlock(hdev); |
213 | 213 | ||
214 | return err; | 214 | return err; |
215 | } | 215 | } |
@@ -343,8 +343,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le | |||
343 | if (haddr.hci_channel > HCI_CHANNEL_CONTROL) | 343 | if (haddr.hci_channel > HCI_CHANNEL_CONTROL) |
344 | return -EINVAL; | 344 | return -EINVAL; |
345 | 345 | ||
346 | if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt) | 346 | if (haddr.hci_channel == HCI_CHANNEL_CONTROL) { |
347 | return -EINVAL; | 347 | if (!enable_mgmt) |
348 | return -EINVAL; | ||
349 | set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags); | ||
350 | } | ||
348 | 351 | ||
349 | lock_sock(sk); | 352 | lock_sock(sk); |
350 | 353 | ||
@@ -535,10 +538,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
535 | 538 | ||
536 | if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { | 539 | if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { |
537 | skb_queue_tail(&hdev->raw_q, skb); | 540 | skb_queue_tail(&hdev->raw_q, skb); |
538 | tasklet_schedule(&hdev->tx_task); | 541 | queue_work(hdev->workqueue, &hdev->tx_work); |
539 | } else { | 542 | } else { |
540 | skb_queue_tail(&hdev->cmd_q, skb); | 543 | skb_queue_tail(&hdev->cmd_q, skb); |
541 | tasklet_schedule(&hdev->cmd_task); | 544 | queue_work(hdev->workqueue, &hdev->cmd_work); |
542 | } | 545 | } |
543 | } else { | 546 | } else { |
544 | if (!capable(CAP_NET_RAW)) { | 547 | if (!capable(CAP_NET_RAW)) { |
@@ -547,7 +550,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
547 | } | 550 | } |
548 | 551 | ||
549 | skb_queue_tail(&hdev->raw_q, skb); | 552 | skb_queue_tail(&hdev->raw_q, skb); |
550 | tasklet_schedule(&hdev->tx_task); | 553 | queue_work(hdev->workqueue, &hdev->tx_work); |
551 | } | 554 | } |
552 | 555 | ||
553 | err = len; | 556 | err = len; |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 661b461cf0b0..521095614235 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -89,11 +89,35 @@ static struct device_type bt_link = { | |||
89 | .release = bt_link_release, | 89 | .release = bt_link_release, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static void add_conn(struct work_struct *work) | 92 | /* |
93 | * The rfcomm tty device will possibly retain even when conn | ||
94 | * is down, and sysfs doesn't support move zombie device, | ||
95 | * so we should move the device before conn device is destroyed. | ||
96 | */ | ||
97 | static int __match_tty(struct device *dev, void *data) | ||
98 | { | ||
99 | return !strncmp(dev_name(dev), "rfcomm", 6); | ||
100 | } | ||
101 | |||
102 | void hci_conn_init_sysfs(struct hci_conn *conn) | ||
103 | { | ||
104 | struct hci_dev *hdev = conn->hdev; | ||
105 | |||
106 | BT_DBG("conn %p", conn); | ||
107 | |||
108 | conn->dev.type = &bt_link; | ||
109 | conn->dev.class = bt_class; | ||
110 | conn->dev.parent = &hdev->dev; | ||
111 | |||
112 | device_initialize(&conn->dev); | ||
113 | } | ||
114 | |||
115 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
93 | { | 116 | { |
94 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); | ||
95 | struct hci_dev *hdev = conn->hdev; | 117 | struct hci_dev *hdev = conn->hdev; |
96 | 118 | ||
119 | BT_DBG("conn %p", conn); | ||
120 | |||
97 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | 121 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
98 | 122 | ||
99 | dev_set_drvdata(&conn->dev, conn); | 123 | dev_set_drvdata(&conn->dev, conn); |
@@ -106,19 +130,8 @@ static void add_conn(struct work_struct *work) | |||
106 | hci_dev_hold(hdev); | 130 | hci_dev_hold(hdev); |
107 | } | 131 | } |
108 | 132 | ||
109 | /* | 133 | void hci_conn_del_sysfs(struct hci_conn *conn) |
110 | * The rfcomm tty device will possibly retain even when conn | ||
111 | * is down, and sysfs doesn't support move zombie device, | ||
112 | * so we should move the device before conn device is destroyed. | ||
113 | */ | ||
114 | static int __match_tty(struct device *dev, void *data) | ||
115 | { | ||
116 | return !strncmp(dev_name(dev), "rfcomm", 6); | ||
117 | } | ||
118 | |||
119 | static void del_conn(struct work_struct *work) | ||
120 | { | 134 | { |
121 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); | ||
122 | struct hci_dev *hdev = conn->hdev; | 135 | struct hci_dev *hdev = conn->hdev; |
123 | 136 | ||
124 | if (!device_is_registered(&conn->dev)) | 137 | if (!device_is_registered(&conn->dev)) |
@@ -140,36 +153,6 @@ static void del_conn(struct work_struct *work) | |||
140 | hci_dev_put(hdev); | 153 | hci_dev_put(hdev); |
141 | } | 154 | } |
142 | 155 | ||
143 | void hci_conn_init_sysfs(struct hci_conn *conn) | ||
144 | { | ||
145 | struct hci_dev *hdev = conn->hdev; | ||
146 | |||
147 | BT_DBG("conn %p", conn); | ||
148 | |||
149 | conn->dev.type = &bt_link; | ||
150 | conn->dev.class = bt_class; | ||
151 | conn->dev.parent = &hdev->dev; | ||
152 | |||
153 | device_initialize(&conn->dev); | ||
154 | |||
155 | INIT_WORK(&conn->work_add, add_conn); | ||
156 | INIT_WORK(&conn->work_del, del_conn); | ||
157 | } | ||
158 | |||
159 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
160 | { | ||
161 | BT_DBG("conn %p", conn); | ||
162 | |||
163 | queue_work(conn->hdev->workqueue, &conn->work_add); | ||
164 | } | ||
165 | |||
166 | void hci_conn_del_sysfs(struct hci_conn *conn) | ||
167 | { | ||
168 | BT_DBG("conn %p", conn); | ||
169 | |||
170 | queue_work(conn->hdev->workqueue, &conn->work_del); | ||
171 | } | ||
172 | |||
173 | static inline char *host_bustostr(int bus) | 156 | static inline char *host_bustostr(int bus) |
174 | { | 157 | { |
175 | switch (bus) { | 158 | switch (bus) { |
@@ -403,7 +386,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p) | |||
403 | struct inquiry_cache *cache = &hdev->inq_cache; | 386 | struct inquiry_cache *cache = &hdev->inq_cache; |
404 | struct inquiry_entry *e; | 387 | struct inquiry_entry *e; |
405 | 388 | ||
406 | hci_dev_lock_bh(hdev); | 389 | hci_dev_lock(hdev); |
407 | 390 | ||
408 | for (e = cache->list; e; e = e->next) { | 391 | for (e = cache->list; e; e = e->next) { |
409 | struct inquiry_data *data = &e->data; | 392 | struct inquiry_data *data = &e->data; |
@@ -416,7 +399,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p) | |||
416 | data->rssi, data->ssp_mode, e->timestamp); | 399 | data->rssi, data->ssp_mode, e->timestamp); |
417 | } | 400 | } |
418 | 401 | ||
419 | hci_dev_unlock_bh(hdev); | 402 | hci_dev_unlock(hdev); |
420 | 403 | ||
421 | return 0; | 404 | return 0; |
422 | } | 405 | } |
@@ -436,19 +419,14 @@ static const struct file_operations inquiry_cache_fops = { | |||
436 | static int blacklist_show(struct seq_file *f, void *p) | 419 | static int blacklist_show(struct seq_file *f, void *p) |
437 | { | 420 | { |
438 | struct hci_dev *hdev = f->private; | 421 | struct hci_dev *hdev = f->private; |
439 | struct list_head *l; | 422 | struct bdaddr_list *b; |
440 | 423 | ||
441 | hci_dev_lock_bh(hdev); | 424 | hci_dev_lock(hdev); |
442 | |||
443 | list_for_each(l, &hdev->blacklist) { | ||
444 | struct bdaddr_list *b; | ||
445 | |||
446 | b = list_entry(l, struct bdaddr_list, list); | ||
447 | 425 | ||
426 | list_for_each_entry(b, &hdev->blacklist, list) | ||
448 | seq_printf(f, "%s\n", batostr(&b->bdaddr)); | 427 | seq_printf(f, "%s\n", batostr(&b->bdaddr)); |
449 | } | ||
450 | 428 | ||
451 | hci_dev_unlock_bh(hdev); | 429 | hci_dev_unlock(hdev); |
452 | 430 | ||
453 | return 0; | 431 | return 0; |
454 | } | 432 | } |
@@ -485,19 +463,14 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) | |||
485 | static int uuids_show(struct seq_file *f, void *p) | 463 | static int uuids_show(struct seq_file *f, void *p) |
486 | { | 464 | { |
487 | struct hci_dev *hdev = f->private; | 465 | struct hci_dev *hdev = f->private; |
488 | struct list_head *l; | 466 | struct bt_uuid *uuid; |
489 | |||
490 | hci_dev_lock_bh(hdev); | ||
491 | |||
492 | list_for_each(l, &hdev->uuids) { | ||
493 | struct bt_uuid *uuid; | ||
494 | 467 | ||
495 | uuid = list_entry(l, struct bt_uuid, list); | 468 | hci_dev_lock(hdev); |
496 | 469 | ||
470 | list_for_each_entry(uuid, &hdev->uuids, list) | ||
497 | print_bt_uuid(f, uuid->uuid); | 471 | print_bt_uuid(f, uuid->uuid); |
498 | } | ||
499 | 472 | ||
500 | hci_dev_unlock_bh(hdev); | 473 | hci_dev_unlock(hdev); |
501 | 474 | ||
502 | return 0; | 475 | return 0; |
503 | } | 476 | } |
@@ -518,11 +491,11 @@ static int auto_accept_delay_set(void *data, u64 val) | |||
518 | { | 491 | { |
519 | struct hci_dev *hdev = data; | 492 | struct hci_dev *hdev = data; |
520 | 493 | ||
521 | hci_dev_lock_bh(hdev); | 494 | hci_dev_lock(hdev); |
522 | 495 | ||
523 | hdev->auto_accept_delay = val; | 496 | hdev->auto_accept_delay = val; |
524 | 497 | ||
525 | hci_dev_unlock_bh(hdev); | 498 | hci_dev_unlock(hdev); |
526 | 499 | ||
527 | return 0; | 500 | return 0; |
528 | } | 501 | } |
@@ -531,11 +504,11 @@ static int auto_accept_delay_get(void *data, u64 *val) | |||
531 | { | 504 | { |
532 | struct hci_dev *hdev = data; | 505 | struct hci_dev *hdev = data; |
533 | 506 | ||
534 | hci_dev_lock_bh(hdev); | 507 | hci_dev_lock(hdev); |
535 | 508 | ||
536 | *val = hdev->auto_accept_delay; | 509 | *val = hdev->auto_accept_delay; |
537 | 510 | ||
538 | hci_dev_unlock_bh(hdev); | 511 | hci_dev_unlock(hdev); |
539 | 512 | ||
540 | return 0; | 513 | return 0; |
541 | } | 514 | } |
@@ -543,22 +516,28 @@ static int auto_accept_delay_get(void *data, u64 *val) | |||
543 | DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, | 516 | DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, |
544 | auto_accept_delay_set, "%llu\n"); | 517 | auto_accept_delay_set, "%llu\n"); |
545 | 518 | ||
546 | int hci_register_sysfs(struct hci_dev *hdev) | 519 | void hci_init_sysfs(struct hci_dev *hdev) |
520 | { | ||
521 | struct device *dev = &hdev->dev; | ||
522 | |||
523 | dev->type = &bt_host; | ||
524 | dev->class = bt_class; | ||
525 | |||
526 | dev_set_drvdata(dev, hdev); | ||
527 | device_initialize(dev); | ||
528 | } | ||
529 | |||
530 | int hci_add_sysfs(struct hci_dev *hdev) | ||
547 | { | 531 | { |
548 | struct device *dev = &hdev->dev; | 532 | struct device *dev = &hdev->dev; |
549 | int err; | 533 | int err; |
550 | 534 | ||
551 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | 535 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
552 | 536 | ||
553 | dev->type = &bt_host; | ||
554 | dev->class = bt_class; | ||
555 | dev->parent = hdev->parent; | 537 | dev->parent = hdev->parent; |
556 | |||
557 | dev_set_name(dev, "%s", hdev->name); | 538 | dev_set_name(dev, "%s", hdev->name); |
558 | 539 | ||
559 | dev_set_drvdata(dev, hdev); | 540 | err = device_add(dev); |
560 | |||
561 | err = device_register(dev); | ||
562 | if (err < 0) | 541 | if (err < 0) |
563 | return err; | 542 | return err; |
564 | 543 | ||
@@ -582,7 +561,7 @@ int hci_register_sysfs(struct hci_dev *hdev) | |||
582 | return 0; | 561 | return 0; |
583 | } | 562 | } |
584 | 563 | ||
585 | void hci_unregister_sysfs(struct hci_dev *hdev) | 564 | void hci_del_sysfs(struct hci_dev *hdev) |
586 | { | 565 | { |
587 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | 566 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
588 | 567 | ||
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 86a91543172a..4deaca78e91e 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_HIDP | 1 | config BT_HIDP |
2 | tristate "HIDP protocol support" | 2 | tristate "HIDP protocol support" |
3 | depends on BT && BT_L2CAP && INPUT && HID_SUPPORT | 3 | depends on BT && INPUT && HID_SUPPORT |
4 | select HID | 4 | select HID |
5 | help | 5 | help |
6 | HIDP (Human Interface Device Protocol) is a transport layer | 6 | HIDP (Human Interface Device Protocol) is a transport layer |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 075a3e920caf..d478be11d562 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; | |||
81 | static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) | 81 | static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) |
82 | { | 82 | { |
83 | struct hidp_session *session; | 83 | struct hidp_session *session; |
84 | struct list_head *p; | ||
85 | 84 | ||
86 | BT_DBG(""); | 85 | BT_DBG(""); |
87 | 86 | ||
88 | list_for_each(p, &hidp_session_list) { | 87 | list_for_each_entry(session, &hidp_session_list, list) { |
89 | session = list_entry(p, struct hidp_session, list); | ||
90 | if (!bacmp(bdaddr, &session->bdaddr)) | 88 | if (!bacmp(bdaddr, &session->bdaddr)) |
91 | return session; | 89 | return session; |
92 | } | 90 | } |
91 | |||
93 | return NULL; | 92 | return NULL; |
94 | } | 93 | } |
95 | 94 | ||
96 | static void __hidp_link_session(struct hidp_session *session) | 95 | static void __hidp_link_session(struct hidp_session *session) |
97 | { | 96 | { |
98 | __module_get(THIS_MODULE); | ||
99 | list_add(&session->list, &hidp_session_list); | 97 | list_add(&session->list, &hidp_session_list); |
100 | |||
101 | hci_conn_hold_device(session->conn); | ||
102 | } | 98 | } |
103 | 99 | ||
104 | static void __hidp_unlink_session(struct hidp_session *session) | 100 | static void __hidp_unlink_session(struct hidp_session *session) |
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session) | |||
106 | hci_conn_put_device(session->conn); | 102 | hci_conn_put_device(session->conn); |
107 | 103 | ||
108 | list_del(&session->list); | 104 | list_del(&session->list); |
109 | module_put(THIS_MODULE); | ||
110 | } | 105 | } |
111 | 106 | ||
112 | static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) | 107 | static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) |
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, | |||
255 | 250 | ||
256 | BT_DBG("session %p data %p size %d", session, data, size); | 251 | BT_DBG("session %p data %p size %d", session, data, size); |
257 | 252 | ||
253 | if (atomic_read(&session->terminate)) | ||
254 | return -EIO; | ||
255 | |||
258 | skb = alloc_skb(size + 1, GFP_ATOMIC); | 256 | skb = alloc_skb(size + 1, GFP_ATOMIC); |
259 | if (!skb) { | 257 | if (!skb) { |
260 | BT_ERR("Can't allocate memory for new frame"); | 258 | BT_ERR("Can't allocate memory for new frame"); |
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid, | |||
329 | struct sk_buff *skb; | 327 | struct sk_buff *skb; |
330 | size_t len; | 328 | size_t len; |
331 | int numbered_reports = hid->report_enum[report_type].numbered; | 329 | int numbered_reports = hid->report_enum[report_type].numbered; |
330 | int ret; | ||
332 | 331 | ||
333 | switch (report_type) { | 332 | switch (report_type) { |
334 | case HID_FEATURE_REPORT: | 333 | case HID_FEATURE_REPORT: |
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid, | |||
352 | session->waiting_report_number = numbered_reports ? report_number : -1; | 351 | session->waiting_report_number = numbered_reports ? report_number : -1; |
353 | set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); | 352 | set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); |
354 | data[0] = report_number; | 353 | data[0] = report_number; |
355 | if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1)) | 354 | ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1); |
356 | goto err_eio; | 355 | if (ret) |
356 | goto err; | ||
357 | 357 | ||
358 | /* Wait for the return of the report. The returned report | 358 | /* Wait for the return of the report. The returned report |
359 | gets put in session->report_return. */ | 359 | gets put in session->report_return. */ |
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid, | |||
365 | 5*HZ); | 365 | 5*HZ); |
366 | if (res == 0) { | 366 | if (res == 0) { |
367 | /* timeout */ | 367 | /* timeout */ |
368 | goto err_eio; | 368 | ret = -EIO; |
369 | goto err; | ||
369 | } | 370 | } |
370 | if (res < 0) { | 371 | if (res < 0) { |
371 | /* signal */ | 372 | /* signal */ |
372 | goto err_restartsys; | 373 | ret = -ERESTARTSYS; |
374 | goto err; | ||
373 | } | 375 | } |
374 | } | 376 | } |
375 | 377 | ||
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid, | |||
390 | 392 | ||
391 | return len; | 393 | return len; |
392 | 394 | ||
393 | err_restartsys: | 395 | err: |
394 | clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); | ||
395 | mutex_unlock(&session->report_mutex); | ||
396 | return -ERESTARTSYS; | ||
397 | err_eio: | ||
398 | clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); | 396 | clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); |
399 | mutex_unlock(&session->report_mutex); | 397 | mutex_unlock(&session->report_mutex); |
400 | return -EIO; | 398 | return ret; |
401 | } | 399 | } |
402 | 400 | ||
403 | static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, | 401 | static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, |
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s | |||
422 | 420 | ||
423 | /* Set up our wait, and send the report request to the device. */ | 421 | /* Set up our wait, and send the report request to the device. */ |
424 | set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); | 422 | set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); |
425 | if (hidp_send_ctrl_message(hid->driver_data, report_type, | 423 | ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, |
426 | data, count)) { | 424 | count); |
427 | ret = -ENOMEM; | 425 | if (ret) |
428 | goto err; | 426 | goto err; |
429 | } | ||
430 | 427 | ||
431 | /* Wait for the ACK from the device. */ | 428 | /* Wait for the ACK from the device. */ |
432 | while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { | 429 | while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { |
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session, | |||
496 | case HIDP_HSHK_ERR_INVALID_REPORT_ID: | 493 | case HIDP_HSHK_ERR_INVALID_REPORT_ID: |
497 | case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: | 494 | case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: |
498 | case HIDP_HSHK_ERR_INVALID_PARAMETER: | 495 | case HIDP_HSHK_ERR_INVALID_PARAMETER: |
499 | if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) { | 496 | if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) |
500 | clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); | ||
501 | wake_up_interruptible(&session->report_queue); | 497 | wake_up_interruptible(&session->report_queue); |
502 | } | 498 | |
503 | /* FIXME: Call into SET_ GET_ handlers here */ | 499 | /* FIXME: Call into SET_ GET_ handlers here */ |
504 | break; | 500 | break; |
505 | 501 | ||
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session, | |||
520 | } | 516 | } |
521 | 517 | ||
522 | /* Wake up the waiting thread. */ | 518 | /* Wake up the waiting thread. */ |
523 | if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { | 519 | if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) |
524 | clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); | ||
525 | wake_up_interruptible(&session->report_queue); | 520 | wake_up_interruptible(&session->report_queue); |
526 | } | ||
527 | } | 521 | } |
528 | 522 | ||
529 | static void hidp_process_hid_control(struct hidp_session *session, | 523 | static void hidp_process_hid_control(struct hidp_session *session, |
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) | |||
663 | return kernel_sendmsg(sock, &msg, &iv, 1, len); | 657 | return kernel_sendmsg(sock, &msg, &iv, 1, len); |
664 | } | 658 | } |
665 | 659 | ||
666 | static void hidp_process_transmit(struct hidp_session *session) | 660 | static void hidp_process_intr_transmit(struct hidp_session *session) |
667 | { | 661 | { |
668 | struct sk_buff *skb; | 662 | struct sk_buff *skb; |
669 | 663 | ||
670 | BT_DBG("session %p", session); | 664 | BT_DBG("session %p", session); |
671 | 665 | ||
672 | while ((skb = skb_dequeue(&session->ctrl_transmit))) { | 666 | while ((skb = skb_dequeue(&session->intr_transmit))) { |
673 | if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { | 667 | if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { |
674 | skb_queue_head(&session->ctrl_transmit, skb); | 668 | skb_queue_head(&session->intr_transmit, skb); |
675 | break; | 669 | break; |
676 | } | 670 | } |
677 | 671 | ||
678 | hidp_set_timer(session); | 672 | hidp_set_timer(session); |
679 | kfree_skb(skb); | 673 | kfree_skb(skb); |
680 | } | 674 | } |
675 | } | ||
681 | 676 | ||
682 | while ((skb = skb_dequeue(&session->intr_transmit))) { | 677 | static void hidp_process_ctrl_transmit(struct hidp_session *session) |
683 | if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { | 678 | { |
684 | skb_queue_head(&session->intr_transmit, skb); | 679 | struct sk_buff *skb; |
680 | |||
681 | BT_DBG("session %p", session); | ||
682 | |||
683 | while ((skb = skb_dequeue(&session->ctrl_transmit))) { | ||
684 | if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { | ||
685 | skb_queue_head(&session->ctrl_transmit, skb); | ||
685 | break; | 686 | break; |
686 | } | 687 | } |
687 | 688 | ||
@@ -700,6 +701,7 @@ static int hidp_session(void *arg) | |||
700 | 701 | ||
701 | BT_DBG("session %p", session); | 702 | BT_DBG("session %p", session); |
702 | 703 | ||
704 | __module_get(THIS_MODULE); | ||
703 | set_user_nice(current, -15); | 705 | set_user_nice(current, -15); |
704 | 706 | ||
705 | init_waitqueue_entry(&ctrl_wait, current); | 707 | init_waitqueue_entry(&ctrl_wait, current); |
@@ -714,23 +716,25 @@ static int hidp_session(void *arg) | |||
714 | intr_sk->sk_state != BT_CONNECTED) | 716 | intr_sk->sk_state != BT_CONNECTED) |
715 | break; | 717 | break; |
716 | 718 | ||
717 | while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { | 719 | while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { |
718 | skb_orphan(skb); | 720 | skb_orphan(skb); |
719 | if (!skb_linearize(skb)) | 721 | if (!skb_linearize(skb)) |
720 | hidp_recv_ctrl_frame(session, skb); | 722 | hidp_recv_intr_frame(session, skb); |
721 | else | 723 | else |
722 | kfree_skb(skb); | 724 | kfree_skb(skb); |
723 | } | 725 | } |
724 | 726 | ||
725 | while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { | 727 | hidp_process_intr_transmit(session); |
728 | |||
729 | while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { | ||
726 | skb_orphan(skb); | 730 | skb_orphan(skb); |
727 | if (!skb_linearize(skb)) | 731 | if (!skb_linearize(skb)) |
728 | hidp_recv_intr_frame(session, skb); | 732 | hidp_recv_ctrl_frame(session, skb); |
729 | else | 733 | else |
730 | kfree_skb(skb); | 734 | kfree_skb(skb); |
731 | } | 735 | } |
732 | 736 | ||
733 | hidp_process_transmit(session); | 737 | hidp_process_ctrl_transmit(session); |
734 | 738 | ||
735 | schedule(); | 739 | schedule(); |
736 | set_current_state(TASK_INTERRUPTIBLE); | 740 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -739,6 +743,10 @@ static int hidp_session(void *arg) | |||
739 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); | 743 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
740 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); | 744 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
741 | 745 | ||
746 | clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); | ||
747 | clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); | ||
748 | wake_up_interruptible(&session->report_queue); | ||
749 | |||
742 | down_write(&hidp_session_sem); | 750 | down_write(&hidp_session_sem); |
743 | 751 | ||
744 | hidp_del_timer(session); | 752 | hidp_del_timer(session); |
@@ -772,34 +780,37 @@ static int hidp_session(void *arg) | |||
772 | 780 | ||
773 | kfree(session->rd_data); | 781 | kfree(session->rd_data); |
774 | kfree(session); | 782 | kfree(session); |
783 | module_put_and_exit(0); | ||
775 | return 0; | 784 | return 0; |
776 | } | 785 | } |
777 | 786 | ||
778 | static struct device *hidp_get_device(struct hidp_session *session) | 787 | static struct hci_conn *hidp_get_connection(struct hidp_session *session) |
779 | { | 788 | { |
780 | bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; | 789 | bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; |
781 | bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; | 790 | bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; |
782 | struct device *device = NULL; | 791 | struct hci_conn *conn; |
783 | struct hci_dev *hdev; | 792 | struct hci_dev *hdev; |
784 | 793 | ||
785 | hdev = hci_get_route(dst, src); | 794 | hdev = hci_get_route(dst, src); |
786 | if (!hdev) | 795 | if (!hdev) |
787 | return NULL; | 796 | return NULL; |
788 | 797 | ||
789 | session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); | 798 | hci_dev_lock(hdev); |
790 | if (session->conn) | 799 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); |
791 | device = &session->conn->dev; | 800 | if (conn) |
801 | hci_conn_hold_device(conn); | ||
802 | hci_dev_unlock(hdev); | ||
792 | 803 | ||
793 | hci_dev_put(hdev); | 804 | hci_dev_put(hdev); |
794 | 805 | ||
795 | return device; | 806 | return conn; |
796 | } | 807 | } |
797 | 808 | ||
798 | static int hidp_setup_input(struct hidp_session *session, | 809 | static int hidp_setup_input(struct hidp_session *session, |
799 | struct hidp_connadd_req *req) | 810 | struct hidp_connadd_req *req) |
800 | { | 811 | { |
801 | struct input_dev *input; | 812 | struct input_dev *input; |
802 | int err, i; | 813 | int i; |
803 | 814 | ||
804 | input = input_allocate_device(); | 815 | input = input_allocate_device(); |
805 | if (!input) | 816 | if (!input) |
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session, | |||
842 | input->relbit[0] |= BIT_MASK(REL_WHEEL); | 853 | input->relbit[0] |= BIT_MASK(REL_WHEEL); |
843 | } | 854 | } |
844 | 855 | ||
845 | input->dev.parent = hidp_get_device(session); | 856 | input->dev.parent = &session->conn->dev; |
846 | 857 | ||
847 | input->event = hidp_input_event; | 858 | input->event = hidp_input_event; |
848 | 859 | ||
849 | err = input_register_device(input); | ||
850 | if (err < 0) { | ||
851 | input_free_device(input); | ||
852 | session->input = NULL; | ||
853 | return err; | ||
854 | } | ||
855 | |||
856 | return 0; | 860 | return 0; |
857 | } | 861 | } |
858 | 862 | ||
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session, | |||
949 | strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); | 953 | strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); |
950 | strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); | 954 | strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); |
951 | 955 | ||
952 | hid->dev.parent = hidp_get_device(session); | 956 | hid->dev.parent = &session->conn->dev; |
953 | hid->ll_driver = &hidp_hid_driver; | 957 | hid->ll_driver = &hidp_hid_driver; |
954 | 958 | ||
955 | hid->hid_get_raw_report = hidp_get_raw_report; | 959 | hid->hid_get_raw_report = hidp_get_raw_report; |
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
976 | bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) | 980 | bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) |
977 | return -ENOTUNIQ; | 981 | return -ENOTUNIQ; |
978 | 982 | ||
979 | session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); | ||
980 | if (!session) | ||
981 | return -ENOMEM; | ||
982 | |||
983 | BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); | 983 | BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); |
984 | 984 | ||
985 | down_write(&hidp_session_sem); | 985 | down_write(&hidp_session_sem); |
986 | 986 | ||
987 | s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); | 987 | s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); |
988 | if (s && s->state == BT_CONNECTED) { | 988 | if (s && s->state == BT_CONNECTED) { |
989 | err = -EEXIST; | 989 | up_write(&hidp_session_sem); |
990 | goto failed; | 990 | return -EEXIST; |
991 | } | ||
992 | |||
993 | session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); | ||
994 | if (!session) { | ||
995 | up_write(&hidp_session_sem); | ||
996 | return -ENOMEM; | ||
991 | } | 997 | } |
992 | 998 | ||
993 | bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); | 999 | bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); |
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1003 | session->intr_sock = intr_sock; | 1009 | session->intr_sock = intr_sock; |
1004 | session->state = BT_CONNECTED; | 1010 | session->state = BT_CONNECTED; |
1005 | 1011 | ||
1012 | session->conn = hidp_get_connection(session); | ||
1013 | if (!session->conn) { | ||
1014 | err = -ENOTCONN; | ||
1015 | goto failed; | ||
1016 | } | ||
1017 | |||
1006 | setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); | 1018 | setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); |
1007 | 1019 | ||
1008 | skb_queue_head_init(&session->ctrl_transmit); | 1020 | skb_queue_head_init(&session->ctrl_transmit); |
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1015 | session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); | 1027 | session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); |
1016 | session->idle_to = req->idle_to; | 1028 | session->idle_to = req->idle_to; |
1017 | 1029 | ||
1030 | __hidp_link_session(session); | ||
1031 | |||
1018 | if (req->rd_size > 0) { | 1032 | if (req->rd_size > 0) { |
1019 | err = hidp_setup_hid(session, req); | 1033 | err = hidp_setup_hid(session, req); |
1020 | if (err && err != -ENODEV) | 1034 | if (err) |
1021 | goto purge; | 1035 | goto purge; |
1022 | } | 1036 | } |
1023 | 1037 | ||
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1027 | goto purge; | 1041 | goto purge; |
1028 | } | 1042 | } |
1029 | 1043 | ||
1030 | __hidp_link_session(session); | ||
1031 | |||
1032 | hidp_set_timer(session); | 1044 | hidp_set_timer(session); |
1033 | 1045 | ||
1034 | if (session->hid) { | 1046 | if (session->hid) { |
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1054 | !session->waiting_for_startup); | 1066 | !session->waiting_for_startup); |
1055 | } | 1067 | } |
1056 | 1068 | ||
1057 | err = hid_add_device(session->hid); | 1069 | if (session->hid) |
1070 | err = hid_add_device(session->hid); | ||
1071 | else | ||
1072 | err = input_register_device(session->input); | ||
1073 | |||
1058 | if (err < 0) { | 1074 | if (err < 0) { |
1059 | atomic_inc(&session->terminate); | 1075 | atomic_inc(&session->terminate); |
1060 | wake_up_process(session->task); | 1076 | wake_up_process(session->task); |
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1077 | unlink: | 1093 | unlink: |
1078 | hidp_del_timer(session); | 1094 | hidp_del_timer(session); |
1079 | 1095 | ||
1080 | __hidp_unlink_session(session); | ||
1081 | |||
1082 | if (session->input) { | 1096 | if (session->input) { |
1083 | input_unregister_device(session->input); | 1097 | input_unregister_device(session->input); |
1084 | session->input = NULL; | 1098 | session->input = NULL; |
@@ -1093,6 +1107,8 @@ unlink: | |||
1093 | session->rd_data = NULL; | 1107 | session->rd_data = NULL; |
1094 | 1108 | ||
1095 | purge: | 1109 | purge: |
1110 | __hidp_unlink_session(session); | ||
1111 | |||
1096 | skb_queue_purge(&session->ctrl_transmit); | 1112 | skb_queue_purge(&session->ctrl_transmit); |
1097 | skb_queue_purge(&session->intr_transmit); | 1113 | skb_queue_purge(&session->intr_transmit); |
1098 | 1114 | ||
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req) | |||
1134 | 1150 | ||
1135 | int hidp_get_connlist(struct hidp_connlist_req *req) | 1151 | int hidp_get_connlist(struct hidp_connlist_req *req) |
1136 | { | 1152 | { |
1137 | struct list_head *p; | 1153 | struct hidp_session *session; |
1138 | int err = 0, n = 0; | 1154 | int err = 0, n = 0; |
1139 | 1155 | ||
1140 | BT_DBG(""); | 1156 | BT_DBG(""); |
1141 | 1157 | ||
1142 | down_read(&hidp_session_sem); | 1158 | down_read(&hidp_session_sem); |
1143 | 1159 | ||
1144 | list_for_each(p, &hidp_session_list) { | 1160 | list_for_each_entry(session, &hidp_session_list, list) { |
1145 | struct hidp_session *session; | ||
1146 | struct hidp_conninfo ci; | 1161 | struct hidp_conninfo ci; |
1147 | 1162 | ||
1148 | session = list_entry(p, struct hidp_session, list); | ||
1149 | |||
1150 | __hidp_copy_session(session, &ci); | 1163 | __hidp_copy_session(session, &ci); |
1151 | 1164 | ||
1152 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { | 1165 | if (copy_to_user(req->ci, &ci, sizeof(ci))) { |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 17b5b1cd9657..aa78d8c4b93b 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -3,6 +3,7 @@ | |||
3 | Copyright (C) 2000-2001 Qualcomm Incorporated | 3 | Copyright (C) 2000-2001 Qualcomm Incorporated |
4 | Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> | 4 | Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> |
5 | Copyright (C) 2010 Google Inc. | 5 | Copyright (C) 2010 Google Inc. |
6 | Copyright (C) 2011 ProFUSION Embedded Systems | ||
6 | 7 | ||
7 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> | 8 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
8 | 9 | ||
@@ -56,10 +57,10 @@ | |||
56 | #include <net/bluetooth/l2cap.h> | 57 | #include <net/bluetooth/l2cap.h> |
57 | #include <net/bluetooth/smp.h> | 58 | #include <net/bluetooth/smp.h> |
58 | 59 | ||
59 | int disable_ertm; | 60 | bool disable_ertm; |
60 | 61 | ||
61 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; | 62 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; |
62 | static u8 l2cap_fixed_chan[8] = { 0x02, }; | 63 | static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, }; |
63 | 64 | ||
64 | static LIST_HEAD(chan_list); | 65 | static LIST_HEAD(chan_list); |
65 | static DEFINE_RWLOCK(chan_list_lock); | 66 | static DEFINE_RWLOCK(chan_list_lock); |
@@ -76,38 +77,38 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); | |||
76 | 77 | ||
77 | /* ---- L2CAP channels ---- */ | 78 | /* ---- L2CAP channels ---- */ |
78 | 79 | ||
79 | static inline void chan_hold(struct l2cap_chan *c) | ||
80 | { | ||
81 | atomic_inc(&c->refcnt); | ||
82 | } | ||
83 | |||
84 | static inline void chan_put(struct l2cap_chan *c) | ||
85 | { | ||
86 | if (atomic_dec_and_test(&c->refcnt)) | ||
87 | kfree(c); | ||
88 | } | ||
89 | |||
90 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) | 80 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) |
91 | { | 81 | { |
92 | struct l2cap_chan *c; | 82 | struct l2cap_chan *c, *r = NULL; |
93 | 83 | ||
94 | list_for_each_entry(c, &conn->chan_l, list) { | 84 | rcu_read_lock(); |
95 | if (c->dcid == cid) | 85 | |
96 | return c; | 86 | list_for_each_entry_rcu(c, &conn->chan_l, list) { |
87 | if (c->dcid == cid) { | ||
88 | r = c; | ||
89 | break; | ||
90 | } | ||
97 | } | 91 | } |
98 | return NULL; | ||
99 | 92 | ||
93 | rcu_read_unlock(); | ||
94 | return r; | ||
100 | } | 95 | } |
101 | 96 | ||
102 | static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) | 97 | static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) |
103 | { | 98 | { |
104 | struct l2cap_chan *c; | 99 | struct l2cap_chan *c, *r = NULL; |
105 | 100 | ||
106 | list_for_each_entry(c, &conn->chan_l, list) { | 101 | rcu_read_lock(); |
107 | if (c->scid == cid) | 102 | |
108 | return c; | 103 | list_for_each_entry_rcu(c, &conn->chan_l, list) { |
104 | if (c->scid == cid) { | ||
105 | r = c; | ||
106 | break; | ||
107 | } | ||
109 | } | 108 | } |
110 | return NULL; | 109 | |
110 | rcu_read_unlock(); | ||
111 | return r; | ||
111 | } | 112 | } |
112 | 113 | ||
113 | /* Find channel with given SCID. | 114 | /* Find channel with given SCID. |
@@ -116,34 +117,36 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci | |||
116 | { | 117 | { |
117 | struct l2cap_chan *c; | 118 | struct l2cap_chan *c; |
118 | 119 | ||
119 | read_lock(&conn->chan_lock); | ||
120 | c = __l2cap_get_chan_by_scid(conn, cid); | 120 | c = __l2cap_get_chan_by_scid(conn, cid); |
121 | if (c) | 121 | if (c) |
122 | bh_lock_sock(c->sk); | 122 | lock_sock(c->sk); |
123 | read_unlock(&conn->chan_lock); | ||
124 | return c; | 123 | return c; |
125 | } | 124 | } |
126 | 125 | ||
127 | static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) | 126 | static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) |
128 | { | 127 | { |
129 | struct l2cap_chan *c; | 128 | struct l2cap_chan *c, *r = NULL; |
130 | 129 | ||
131 | list_for_each_entry(c, &conn->chan_l, list) { | 130 | rcu_read_lock(); |
132 | if (c->ident == ident) | 131 | |
133 | return c; | 132 | list_for_each_entry_rcu(c, &conn->chan_l, list) { |
133 | if (c->ident == ident) { | ||
134 | r = c; | ||
135 | break; | ||
136 | } | ||
134 | } | 137 | } |
135 | return NULL; | 138 | |
139 | rcu_read_unlock(); | ||
140 | return r; | ||
136 | } | 141 | } |
137 | 142 | ||
138 | static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) | 143 | static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) |
139 | { | 144 | { |
140 | struct l2cap_chan *c; | 145 | struct l2cap_chan *c; |
141 | 146 | ||
142 | read_lock(&conn->chan_lock); | ||
143 | c = __l2cap_get_chan_by_ident(conn, ident); | 147 | c = __l2cap_get_chan_by_ident(conn, ident); |
144 | if (c) | 148 | if (c) |
145 | bh_lock_sock(c->sk); | 149 | lock_sock(c->sk); |
146 | read_unlock(&conn->chan_lock); | ||
147 | return c; | 150 | return c; |
148 | } | 151 | } |
149 | 152 | ||
@@ -153,12 +156,9 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) | |||
153 | 156 | ||
154 | list_for_each_entry(c, &chan_list, global_l) { | 157 | list_for_each_entry(c, &chan_list, global_l) { |
155 | if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) | 158 | if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src)) |
156 | goto found; | 159 | return c; |
157 | } | 160 | } |
158 | 161 | return NULL; | |
159 | c = NULL; | ||
160 | found: | ||
161 | return c; | ||
162 | } | 162 | } |
163 | 163 | ||
164 | int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) | 164 | int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) |
@@ -217,45 +217,51 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn) | |||
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) | 220 | static char *state_to_string(int state) |
221 | { | 221 | { |
222 | BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); | 222 | switch(state) { |
223 | 223 | case BT_CONNECTED: | |
224 | if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) | 224 | return "BT_CONNECTED"; |
225 | chan_hold(chan); | 225 | case BT_OPEN: |
226 | } | 226 | return "BT_OPEN"; |
227 | 227 | case BT_BOUND: | |
228 | static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer) | 228 | return "BT_BOUND"; |
229 | { | 229 | case BT_LISTEN: |
230 | BT_DBG("chan %p state %d", chan, chan->state); | 230 | return "BT_LISTEN"; |
231 | case BT_CONNECT: | ||
232 | return "BT_CONNECT"; | ||
233 | case BT_CONNECT2: | ||
234 | return "BT_CONNECT2"; | ||
235 | case BT_CONFIG: | ||
236 | return "BT_CONFIG"; | ||
237 | case BT_DISCONN: | ||
238 | return "BT_DISCONN"; | ||
239 | case BT_CLOSED: | ||
240 | return "BT_CLOSED"; | ||
241 | } | ||
231 | 242 | ||
232 | if (timer_pending(timer) && del_timer(timer)) | 243 | return "invalid state"; |
233 | chan_put(chan); | ||
234 | } | 244 | } |
235 | 245 | ||
236 | static void l2cap_state_change(struct l2cap_chan *chan, int state) | 246 | static void l2cap_state_change(struct l2cap_chan *chan, int state) |
237 | { | 247 | { |
248 | BT_DBG("%p %s -> %s", chan, state_to_string(chan->state), | ||
249 | state_to_string(state)); | ||
250 | |||
238 | chan->state = state; | 251 | chan->state = state; |
239 | chan->ops->state_change(chan->data, state); | 252 | chan->ops->state_change(chan->data, state); |
240 | } | 253 | } |
241 | 254 | ||
242 | static void l2cap_chan_timeout(unsigned long arg) | 255 | static void l2cap_chan_timeout(struct work_struct *work) |
243 | { | 256 | { |
244 | struct l2cap_chan *chan = (struct l2cap_chan *) arg; | 257 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
258 | chan_timer.work); | ||
245 | struct sock *sk = chan->sk; | 259 | struct sock *sk = chan->sk; |
246 | int reason; | 260 | int reason; |
247 | 261 | ||
248 | BT_DBG("chan %p state %d", chan, chan->state); | 262 | BT_DBG("chan %p state %d", chan, chan->state); |
249 | 263 | ||
250 | bh_lock_sock(sk); | 264 | lock_sock(sk); |
251 | |||
252 | if (sock_owned_by_user(sk)) { | ||
253 | /* sk is owned by user. Try again later */ | ||
254 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); | ||
255 | bh_unlock_sock(sk); | ||
256 | chan_put(chan); | ||
257 | return; | ||
258 | } | ||
259 | 265 | ||
260 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) | 266 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) |
261 | reason = ECONNREFUSED; | 267 | reason = ECONNREFUSED; |
@@ -267,10 +273,10 @@ static void l2cap_chan_timeout(unsigned long arg) | |||
267 | 273 | ||
268 | l2cap_chan_close(chan, reason); | 274 | l2cap_chan_close(chan, reason); |
269 | 275 | ||
270 | bh_unlock_sock(sk); | 276 | release_sock(sk); |
271 | 277 | ||
272 | chan->ops->close(chan->data); | 278 | chan->ops->close(chan->data); |
273 | chan_put(chan); | 279 | l2cap_chan_put(chan); |
274 | } | 280 | } |
275 | 281 | ||
276 | struct l2cap_chan *l2cap_chan_create(struct sock *sk) | 282 | struct l2cap_chan *l2cap_chan_create(struct sock *sk) |
@@ -287,12 +293,14 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) | |||
287 | list_add(&chan->global_l, &chan_list); | 293 | list_add(&chan->global_l, &chan_list); |
288 | write_unlock_bh(&chan_list_lock); | 294 | write_unlock_bh(&chan_list_lock); |
289 | 295 | ||
290 | setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan); | 296 | INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); |
291 | 297 | ||
292 | chan->state = BT_OPEN; | 298 | chan->state = BT_OPEN; |
293 | 299 | ||
294 | atomic_set(&chan->refcnt, 1); | 300 | atomic_set(&chan->refcnt, 1); |
295 | 301 | ||
302 | BT_DBG("sk %p chan %p", sk, chan); | ||
303 | |||
296 | return chan; | 304 | return chan; |
297 | } | 305 | } |
298 | 306 | ||
@@ -302,15 +310,15 @@ void l2cap_chan_destroy(struct l2cap_chan *chan) | |||
302 | list_del(&chan->global_l); | 310 | list_del(&chan->global_l); |
303 | write_unlock_bh(&chan_list_lock); | 311 | write_unlock_bh(&chan_list_lock); |
304 | 312 | ||
305 | chan_put(chan); | 313 | l2cap_chan_put(chan); |
306 | } | 314 | } |
307 | 315 | ||
308 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | 316 | static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) |
309 | { | 317 | { |
310 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, | 318 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, |
311 | chan->psm, chan->dcid); | 319 | chan->psm, chan->dcid); |
312 | 320 | ||
313 | conn->disc_reason = 0x13; | 321 | conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; |
314 | 322 | ||
315 | chan->conn = conn; | 323 | chan->conn = conn; |
316 | 324 | ||
@@ -337,9 +345,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | |||
337 | chan->omtu = L2CAP_DEFAULT_MTU; | 345 | chan->omtu = L2CAP_DEFAULT_MTU; |
338 | } | 346 | } |
339 | 347 | ||
340 | chan_hold(chan); | 348 | chan->local_id = L2CAP_BESTEFFORT_ID; |
349 | chan->local_stype = L2CAP_SERV_BESTEFFORT; | ||
350 | chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; | ||
351 | chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; | ||
352 | chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; | ||
353 | chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; | ||
354 | |||
355 | l2cap_chan_hold(chan); | ||
341 | 356 | ||
342 | list_add(&chan->list, &conn->chan_l); | 357 | list_add_rcu(&chan->list, &conn->chan_l); |
343 | } | 358 | } |
344 | 359 | ||
345 | /* Delete channel. | 360 | /* Delete channel. |
@@ -356,10 +371,10 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err) | |||
356 | 371 | ||
357 | if (conn) { | 372 | if (conn) { |
358 | /* Delete from channel list */ | 373 | /* Delete from channel list */ |
359 | write_lock_bh(&conn->chan_lock); | 374 | list_del_rcu(&chan->list); |
360 | list_del(&chan->list); | 375 | synchronize_rcu(); |
361 | write_unlock_bh(&conn->chan_lock); | 376 | |
362 | chan_put(chan); | 377 | l2cap_chan_put(chan); |
363 | 378 | ||
364 | chan->conn = NULL; | 379 | chan->conn = NULL; |
365 | hci_conn_put(conn->hcon); | 380 | hci_conn_put(conn->hcon); |
@@ -508,7 +523,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) | |||
508 | } | 523 | } |
509 | 524 | ||
510 | /* Service level security */ | 525 | /* Service level security */ |
511 | static inline int l2cap_check_security(struct l2cap_chan *chan) | 526 | int l2cap_chan_check_security(struct l2cap_chan *chan) |
512 | { | 527 | { |
513 | struct l2cap_conn *conn = chan->conn; | 528 | struct l2cap_conn *conn = chan->conn; |
514 | __u8 auth_type; | 529 | __u8 auth_type; |
@@ -556,34 +571,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, | |||
556 | flags = ACL_START; | 571 | flags = ACL_START; |
557 | 572 | ||
558 | bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; | 573 | bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; |
574 | skb->priority = HCI_PRIO_MAX; | ||
575 | |||
576 | hci_send_acl(conn->hchan, skb, flags); | ||
577 | } | ||
559 | 578 | ||
560 | hci_send_acl(conn->hcon, skb, flags); | 579 | static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) |
580 | { | ||
581 | struct hci_conn *hcon = chan->conn->hcon; | ||
582 | u16 flags; | ||
583 | |||
584 | BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, | ||
585 | skb->priority); | ||
586 | |||
587 | if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && | ||
588 | lmp_no_flush_capable(hcon->hdev)) | ||
589 | flags = ACL_START_NO_FLUSH; | ||
590 | else | ||
591 | flags = ACL_START; | ||
592 | |||
593 | bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); | ||
594 | hci_send_acl(chan->conn->hchan, skb, flags); | ||
561 | } | 595 | } |
562 | 596 | ||
563 | static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) | 597 | static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) |
564 | { | 598 | { |
565 | struct sk_buff *skb; | 599 | struct sk_buff *skb; |
566 | struct l2cap_hdr *lh; | 600 | struct l2cap_hdr *lh; |
567 | struct l2cap_conn *conn = chan->conn; | 601 | struct l2cap_conn *conn = chan->conn; |
568 | int count, hlen = L2CAP_HDR_SIZE + 2; | 602 | int count, hlen; |
569 | u8 flags; | ||
570 | 603 | ||
571 | if (chan->state != BT_CONNECTED) | 604 | if (chan->state != BT_CONNECTED) |
572 | return; | 605 | return; |
573 | 606 | ||
607 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
608 | hlen = L2CAP_EXT_HDR_SIZE; | ||
609 | else | ||
610 | hlen = L2CAP_ENH_HDR_SIZE; | ||
611 | |||
574 | if (chan->fcs == L2CAP_FCS_CRC16) | 612 | if (chan->fcs == L2CAP_FCS_CRC16) |
575 | hlen += 2; | 613 | hlen += L2CAP_FCS_SIZE; |
576 | 614 | ||
577 | BT_DBG("chan %p, control 0x%2.2x", chan, control); | 615 | BT_DBG("chan %p, control 0x%8.8x", chan, control); |
578 | 616 | ||
579 | count = min_t(unsigned int, conn->mtu, hlen); | 617 | count = min_t(unsigned int, conn->mtu, hlen); |
580 | control |= L2CAP_CTRL_FRAME_TYPE; | 618 | |
619 | control |= __set_sframe(chan); | ||
581 | 620 | ||
582 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 621 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
583 | control |= L2CAP_CTRL_FINAL; | 622 | control |= __set_ctrl_final(chan); |
584 | 623 | ||
585 | if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) | 624 | if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) |
586 | control |= L2CAP_CTRL_POLL; | 625 | control |= __set_ctrl_poll(chan); |
587 | 626 | ||
588 | skb = bt_skb_alloc(count, GFP_ATOMIC); | 627 | skb = bt_skb_alloc(count, GFP_ATOMIC); |
589 | if (!skb) | 628 | if (!skb) |
@@ -592,32 +631,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) | |||
592 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 631 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
593 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); | 632 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); |
594 | lh->cid = cpu_to_le16(chan->dcid); | 633 | lh->cid = cpu_to_le16(chan->dcid); |
595 | put_unaligned_le16(control, skb_put(skb, 2)); | 634 | |
635 | __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); | ||
596 | 636 | ||
597 | if (chan->fcs == L2CAP_FCS_CRC16) { | 637 | if (chan->fcs == L2CAP_FCS_CRC16) { |
598 | u16 fcs = crc16(0, (u8 *)lh, count - 2); | 638 | u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); |
599 | put_unaligned_le16(fcs, skb_put(skb, 2)); | 639 | put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); |
600 | } | 640 | } |
601 | 641 | ||
602 | if (lmp_no_flush_capable(conn->hcon->hdev)) | 642 | skb->priority = HCI_PRIO_MAX; |
603 | flags = ACL_START_NO_FLUSH; | 643 | l2cap_do_send(chan, skb); |
604 | else | ||
605 | flags = ACL_START; | ||
606 | |||
607 | bt_cb(skb)->force_active = chan->force_active; | ||
608 | |||
609 | hci_send_acl(chan->conn->hcon, skb, flags); | ||
610 | } | 644 | } |
611 | 645 | ||
612 | static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) | 646 | static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) |
613 | { | 647 | { |
614 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 648 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
615 | control |= L2CAP_SUPER_RCV_NOT_READY; | 649 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); |
616 | set_bit(CONN_RNR_SENT, &chan->conn_state); | 650 | set_bit(CONN_RNR_SENT, &chan->conn_state); |
617 | } else | 651 | } else |
618 | control |= L2CAP_SUPER_RCV_READY; | 652 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); |
619 | 653 | ||
620 | control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 654 | control |= __set_reqseq(chan, chan->buffer_seq); |
621 | 655 | ||
622 | l2cap_send_sframe(chan, control); | 656 | l2cap_send_sframe(chan, control); |
623 | } | 657 | } |
@@ -635,7 +669,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) | |||
635 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) | 669 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) |
636 | return; | 670 | return; |
637 | 671 | ||
638 | if (l2cap_check_security(chan) && | 672 | if (l2cap_chan_check_security(chan) && |
639 | __l2cap_no_conn_pending(chan)) { | 673 | __l2cap_no_conn_pending(chan)) { |
640 | struct l2cap_conn_req req; | 674 | struct l2cap_conn_req req; |
641 | req.scid = cpu_to_le16(chan->scid); | 675 | req.scid = cpu_to_le16(chan->scid); |
@@ -654,7 +688,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) | |||
654 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | 688 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
655 | conn->info_ident = l2cap_get_ident(conn); | 689 | conn->info_ident = l2cap_get_ident(conn); |
656 | 690 | ||
657 | mod_timer(&conn->info_timer, jiffies + | 691 | schedule_delayed_work(&conn->info_timer, |
658 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | 692 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); |
659 | 693 | ||
660 | l2cap_send_cmd(conn, conn->info_ident, | 694 | l2cap_send_cmd(conn, conn->info_ident, |
@@ -706,13 +740,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c | |||
706 | /* ---- L2CAP connections ---- */ | 740 | /* ---- L2CAP connections ---- */ |
707 | static void l2cap_conn_start(struct l2cap_conn *conn) | 741 | static void l2cap_conn_start(struct l2cap_conn *conn) |
708 | { | 742 | { |
709 | struct l2cap_chan *chan, *tmp; | 743 | struct l2cap_chan *chan; |
710 | 744 | ||
711 | BT_DBG("conn %p", conn); | 745 | BT_DBG("conn %p", conn); |
712 | 746 | ||
713 | read_lock(&conn->chan_lock); | 747 | rcu_read_lock(); |
714 | 748 | ||
715 | list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { | 749 | list_for_each_entry_rcu(chan, &conn->chan_l, list) { |
716 | struct sock *sk = chan->sk; | 750 | struct sock *sk = chan->sk; |
717 | 751 | ||
718 | bh_lock_sock(sk); | 752 | bh_lock_sock(sk); |
@@ -725,7 +759,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
725 | if (chan->state == BT_CONNECT) { | 759 | if (chan->state == BT_CONNECT) { |
726 | struct l2cap_conn_req req; | 760 | struct l2cap_conn_req req; |
727 | 761 | ||
728 | if (!l2cap_check_security(chan) || | 762 | if (!l2cap_chan_check_security(chan) || |
729 | !__l2cap_no_conn_pending(chan)) { | 763 | !__l2cap_no_conn_pending(chan)) { |
730 | bh_unlock_sock(sk); | 764 | bh_unlock_sock(sk); |
731 | continue; | 765 | continue; |
@@ -736,9 +770,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
736 | &chan->conf_state)) { | 770 | &chan->conf_state)) { |
737 | /* l2cap_chan_close() calls list_del(chan) | 771 | /* l2cap_chan_close() calls list_del(chan) |
738 | * so release the lock */ | 772 | * so release the lock */ |
739 | read_unlock(&conn->chan_lock); | ||
740 | l2cap_chan_close(chan, ECONNRESET); | 773 | l2cap_chan_close(chan, ECONNRESET); |
741 | read_lock(&conn->chan_lock); | ||
742 | bh_unlock_sock(sk); | 774 | bh_unlock_sock(sk); |
743 | continue; | 775 | continue; |
744 | } | 776 | } |
@@ -758,7 +790,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
758 | rsp.scid = cpu_to_le16(chan->dcid); | 790 | rsp.scid = cpu_to_le16(chan->dcid); |
759 | rsp.dcid = cpu_to_le16(chan->scid); | 791 | rsp.dcid = cpu_to_le16(chan->scid); |
760 | 792 | ||
761 | if (l2cap_check_security(chan)) { | 793 | if (l2cap_chan_check_security(chan)) { |
762 | if (bt_sk(sk)->defer_setup) { | 794 | if (bt_sk(sk)->defer_setup) { |
763 | struct sock *parent = bt_sk(sk)->parent; | 795 | struct sock *parent = bt_sk(sk)->parent; |
764 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); | 796 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); |
@@ -794,7 +826,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
794 | bh_unlock_sock(sk); | 826 | bh_unlock_sock(sk); |
795 | } | 827 | } |
796 | 828 | ||
797 | read_unlock(&conn->chan_lock); | 829 | rcu_read_unlock(); |
798 | } | 830 | } |
799 | 831 | ||
800 | /* Find socket with cid and source bdaddr. | 832 | /* Find socket with cid and source bdaddr. |
@@ -845,7 +877,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
845 | 877 | ||
846 | parent = pchan->sk; | 878 | parent = pchan->sk; |
847 | 879 | ||
848 | bh_lock_sock(parent); | 880 | lock_sock(parent); |
849 | 881 | ||
850 | /* Check for backlog size */ | 882 | /* Check for backlog size */ |
851 | if (sk_acceptq_is_full(parent)) { | 883 | if (sk_acceptq_is_full(parent)) { |
@@ -859,8 +891,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
859 | 891 | ||
860 | sk = chan->sk; | 892 | sk = chan->sk; |
861 | 893 | ||
862 | write_lock_bh(&conn->chan_lock); | ||
863 | |||
864 | hci_conn_hold(conn->hcon); | 894 | hci_conn_hold(conn->hcon); |
865 | 895 | ||
866 | bacpy(&bt_sk(sk)->src, conn->src); | 896 | bacpy(&bt_sk(sk)->src, conn->src); |
@@ -868,17 +898,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
868 | 898 | ||
869 | bt_accept_enqueue(parent, sk); | 899 | bt_accept_enqueue(parent, sk); |
870 | 900 | ||
871 | __l2cap_chan_add(conn, chan); | 901 | l2cap_chan_add(conn, chan); |
872 | 902 | ||
873 | __set_chan_timer(chan, sk->sk_sndtimeo); | 903 | __set_chan_timer(chan, sk->sk_sndtimeo); |
874 | 904 | ||
875 | l2cap_state_change(chan, BT_CONNECTED); | 905 | l2cap_state_change(chan, BT_CONNECTED); |
876 | parent->sk_data_ready(parent, 0); | 906 | parent->sk_data_ready(parent, 0); |
877 | 907 | ||
878 | write_unlock_bh(&conn->chan_lock); | ||
879 | |||
880 | clean: | 908 | clean: |
881 | bh_unlock_sock(parent); | 909 | release_sock(parent); |
882 | } | 910 | } |
883 | 911 | ||
884 | static void l2cap_chan_ready(struct sock *sk) | 912 | static void l2cap_chan_ready(struct sock *sk) |
@@ -910,9 +938,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) | |||
910 | if (conn->hcon->out && conn->hcon->type == LE_LINK) | 938 | if (conn->hcon->out && conn->hcon->type == LE_LINK) |
911 | smp_conn_security(conn, conn->hcon->pending_sec_level); | 939 | smp_conn_security(conn, conn->hcon->pending_sec_level); |
912 | 940 | ||
913 | read_lock(&conn->chan_lock); | 941 | rcu_read_lock(); |
914 | 942 | ||
915 | list_for_each_entry(chan, &conn->chan_l, list) { | 943 | list_for_each_entry_rcu(chan, &conn->chan_l, list) { |
916 | struct sock *sk = chan->sk; | 944 | struct sock *sk = chan->sk; |
917 | 945 | ||
918 | bh_lock_sock(sk); | 946 | bh_lock_sock(sk); |
@@ -932,7 +960,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) | |||
932 | bh_unlock_sock(sk); | 960 | bh_unlock_sock(sk); |
933 | } | 961 | } |
934 | 962 | ||
935 | read_unlock(&conn->chan_lock); | 963 | rcu_read_unlock(); |
936 | } | 964 | } |
937 | 965 | ||
938 | /* Notify sockets that we cannot guaranty reliability anymore */ | 966 | /* Notify sockets that we cannot guaranty reliability anymore */ |
@@ -942,21 +970,22 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) | |||
942 | 970 | ||
943 | BT_DBG("conn %p", conn); | 971 | BT_DBG("conn %p", conn); |
944 | 972 | ||
945 | read_lock(&conn->chan_lock); | 973 | rcu_read_lock(); |
946 | 974 | ||
947 | list_for_each_entry(chan, &conn->chan_l, list) { | 975 | list_for_each_entry_rcu(chan, &conn->chan_l, list) { |
948 | struct sock *sk = chan->sk; | 976 | struct sock *sk = chan->sk; |
949 | 977 | ||
950 | if (chan->force_reliable) | 978 | if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) |
951 | sk->sk_err = err; | 979 | sk->sk_err = err; |
952 | } | 980 | } |
953 | 981 | ||
954 | read_unlock(&conn->chan_lock); | 982 | rcu_read_unlock(); |
955 | } | 983 | } |
956 | 984 | ||
957 | static void l2cap_info_timeout(unsigned long arg) | 985 | static void l2cap_info_timeout(struct work_struct *work) |
958 | { | 986 | { |
959 | struct l2cap_conn *conn = (void *) arg; | 987 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
988 | info_timer.work); | ||
960 | 989 | ||
961 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 990 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
962 | conn->info_ident = 0; | 991 | conn->info_ident = 0; |
@@ -980,17 +1009,19 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) | |||
980 | /* Kill channels */ | 1009 | /* Kill channels */ |
981 | list_for_each_entry_safe(chan, l, &conn->chan_l, list) { | 1010 | list_for_each_entry_safe(chan, l, &conn->chan_l, list) { |
982 | sk = chan->sk; | 1011 | sk = chan->sk; |
983 | bh_lock_sock(sk); | 1012 | lock_sock(sk); |
984 | l2cap_chan_del(chan, err); | 1013 | l2cap_chan_del(chan, err); |
985 | bh_unlock_sock(sk); | 1014 | release_sock(sk); |
986 | chan->ops->close(chan->data); | 1015 | chan->ops->close(chan->data); |
987 | } | 1016 | } |
988 | 1017 | ||
1018 | hci_chan_del(conn->hchan); | ||
1019 | |||
989 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) | 1020 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) |
990 | del_timer_sync(&conn->info_timer); | 1021 | __cancel_delayed_work(&conn->info_timer); |
991 | 1022 | ||
992 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { | 1023 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { |
993 | del_timer(&conn->security_timer); | 1024 | __cancel_delayed_work(&conn->security_timer); |
994 | smp_chan_destroy(conn); | 1025 | smp_chan_destroy(conn); |
995 | } | 1026 | } |
996 | 1027 | ||
@@ -998,9 +1029,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) | |||
998 | kfree(conn); | 1029 | kfree(conn); |
999 | } | 1030 | } |
1000 | 1031 | ||
1001 | static void security_timeout(unsigned long arg) | 1032 | static void security_timeout(struct work_struct *work) |
1002 | { | 1033 | { |
1003 | struct l2cap_conn *conn = (void *) arg; | 1034 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
1035 | security_timer.work); | ||
1004 | 1036 | ||
1005 | l2cap_conn_del(conn->hcon, ETIMEDOUT); | 1037 | l2cap_conn_del(conn->hcon, ETIMEDOUT); |
1006 | } | 1038 | } |
@@ -1008,18 +1040,26 @@ static void security_timeout(unsigned long arg) | |||
1008 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | 1040 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) |
1009 | { | 1041 | { |
1010 | struct l2cap_conn *conn = hcon->l2cap_data; | 1042 | struct l2cap_conn *conn = hcon->l2cap_data; |
1043 | struct hci_chan *hchan; | ||
1011 | 1044 | ||
1012 | if (conn || status) | 1045 | if (conn || status) |
1013 | return conn; | 1046 | return conn; |
1014 | 1047 | ||
1048 | hchan = hci_chan_create(hcon); | ||
1049 | if (!hchan) | ||
1050 | return NULL; | ||
1051 | |||
1015 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); | 1052 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); |
1016 | if (!conn) | 1053 | if (!conn) { |
1054 | hci_chan_del(hchan); | ||
1017 | return NULL; | 1055 | return NULL; |
1056 | } | ||
1018 | 1057 | ||
1019 | hcon->l2cap_data = conn; | 1058 | hcon->l2cap_data = conn; |
1020 | conn->hcon = hcon; | 1059 | conn->hcon = hcon; |
1060 | conn->hchan = hchan; | ||
1021 | 1061 | ||
1022 | BT_DBG("hcon %p conn %p", hcon, conn); | 1062 | BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); |
1023 | 1063 | ||
1024 | if (hcon->hdev->le_mtu && hcon->type == LE_LINK) | 1064 | if (hcon->hdev->le_mtu && hcon->type == LE_LINK) |
1025 | conn->mtu = hcon->hdev->le_mtu; | 1065 | conn->mtu = hcon->hdev->le_mtu; |
@@ -1032,29 +1072,19 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
1032 | conn->feat_mask = 0; | 1072 | conn->feat_mask = 0; |
1033 | 1073 | ||
1034 | spin_lock_init(&conn->lock); | 1074 | spin_lock_init(&conn->lock); |
1035 | rwlock_init(&conn->chan_lock); | ||
1036 | 1075 | ||
1037 | INIT_LIST_HEAD(&conn->chan_l); | 1076 | INIT_LIST_HEAD(&conn->chan_l); |
1038 | 1077 | ||
1039 | if (hcon->type == LE_LINK) | 1078 | if (hcon->type == LE_LINK) |
1040 | setup_timer(&conn->security_timer, security_timeout, | 1079 | INIT_DELAYED_WORK(&conn->security_timer, security_timeout); |
1041 | (unsigned long) conn); | ||
1042 | else | 1080 | else |
1043 | setup_timer(&conn->info_timer, l2cap_info_timeout, | 1081 | INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); |
1044 | (unsigned long) conn); | ||
1045 | 1082 | ||
1046 | conn->disc_reason = 0x13; | 1083 | conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; |
1047 | 1084 | ||
1048 | return conn; | 1085 | return conn; |
1049 | } | 1086 | } |
1050 | 1087 | ||
1051 | static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | ||
1052 | { | ||
1053 | write_lock_bh(&conn->chan_lock); | ||
1054 | __l2cap_chan_add(conn, chan); | ||
1055 | write_unlock_bh(&conn->chan_lock); | ||
1056 | } | ||
1057 | |||
1058 | /* ---- Socket interface ---- */ | 1088 | /* ---- Socket interface ---- */ |
1059 | 1089 | ||
1060 | /* Find socket with psm and source bdaddr. | 1090 | /* Find socket with psm and source bdaddr. |
@@ -1090,11 +1120,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr | |||
1090 | return c1; | 1120 | return c1; |
1091 | } | 1121 | } |
1092 | 1122 | ||
1093 | int l2cap_chan_connect(struct l2cap_chan *chan) | 1123 | inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) |
1094 | { | 1124 | { |
1095 | struct sock *sk = chan->sk; | 1125 | struct sock *sk = chan->sk; |
1096 | bdaddr_t *src = &bt_sk(sk)->src; | 1126 | bdaddr_t *src = &bt_sk(sk)->src; |
1097 | bdaddr_t *dst = &bt_sk(sk)->dst; | ||
1098 | struct l2cap_conn *conn; | 1127 | struct l2cap_conn *conn; |
1099 | struct hci_conn *hcon; | 1128 | struct hci_conn *hcon; |
1100 | struct hci_dev *hdev; | 1129 | struct hci_dev *hdev; |
@@ -1108,7 +1137,62 @@ int l2cap_chan_connect(struct l2cap_chan *chan) | |||
1108 | if (!hdev) | 1137 | if (!hdev) |
1109 | return -EHOSTUNREACH; | 1138 | return -EHOSTUNREACH; |
1110 | 1139 | ||
1111 | hci_dev_lock_bh(hdev); | 1140 | hci_dev_lock(hdev); |
1141 | |||
1142 | lock_sock(sk); | ||
1143 | |||
1144 | /* PSM must be odd and lsb of upper byte must be 0 */ | ||
1145 | if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && | ||
1146 | chan->chan_type != L2CAP_CHAN_RAW) { | ||
1147 | err = -EINVAL; | ||
1148 | goto done; | ||
1149 | } | ||
1150 | |||
1151 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) { | ||
1152 | err = -EINVAL; | ||
1153 | goto done; | ||
1154 | } | ||
1155 | |||
1156 | switch (chan->mode) { | ||
1157 | case L2CAP_MODE_BASIC: | ||
1158 | break; | ||
1159 | case L2CAP_MODE_ERTM: | ||
1160 | case L2CAP_MODE_STREAMING: | ||
1161 | if (!disable_ertm) | ||
1162 | break; | ||
1163 | /* fall through */ | ||
1164 | default: | ||
1165 | err = -ENOTSUPP; | ||
1166 | goto done; | ||
1167 | } | ||
1168 | |||
1169 | switch (sk->sk_state) { | ||
1170 | case BT_CONNECT: | ||
1171 | case BT_CONNECT2: | ||
1172 | case BT_CONFIG: | ||
1173 | /* Already connecting */ | ||
1174 | err = 0; | ||
1175 | goto done; | ||
1176 | |||
1177 | case BT_CONNECTED: | ||
1178 | /* Already connected */ | ||
1179 | err = -EISCONN; | ||
1180 | goto done; | ||
1181 | |||
1182 | case BT_OPEN: | ||
1183 | case BT_BOUND: | ||
1184 | /* Can connect */ | ||
1185 | break; | ||
1186 | |||
1187 | default: | ||
1188 | err = -EBADFD; | ||
1189 | goto done; | ||
1190 | } | ||
1191 | |||
1192 | /* Set destination address and psm */ | ||
1193 | bacpy(&bt_sk(sk)->dst, src); | ||
1194 | chan->psm = psm; | ||
1195 | chan->dcid = cid; | ||
1112 | 1196 | ||
1113 | auth_type = l2cap_get_auth_type(chan); | 1197 | auth_type = l2cap_get_auth_type(chan); |
1114 | 1198 | ||
@@ -1142,7 +1226,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan) | |||
1142 | if (hcon->state == BT_CONNECTED) { | 1226 | if (hcon->state == BT_CONNECTED) { |
1143 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { | 1227 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
1144 | __clear_chan_timer(chan); | 1228 | __clear_chan_timer(chan); |
1145 | if (l2cap_check_security(chan)) | 1229 | if (l2cap_chan_check_security(chan)) |
1146 | l2cap_state_change(chan, BT_CONNECTED); | 1230 | l2cap_state_change(chan, BT_CONNECTED); |
1147 | } else | 1231 | } else |
1148 | l2cap_do_start(chan); | 1232 | l2cap_do_start(chan); |
@@ -1151,7 +1235,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan) | |||
1151 | err = 0; | 1235 | err = 0; |
1152 | 1236 | ||
1153 | done: | 1237 | done: |
1154 | hci_dev_unlock_bh(hdev); | 1238 | hci_dev_unlock(hdev); |
1155 | hci_dev_put(hdev); | 1239 | hci_dev_put(hdev); |
1156 | return err; | 1240 | return err; |
1157 | } | 1241 | } |
@@ -1188,17 +1272,18 @@ int __l2cap_wait_ack(struct sock *sk) | |||
1188 | return err; | 1272 | return err; |
1189 | } | 1273 | } |
1190 | 1274 | ||
1191 | static void l2cap_monitor_timeout(unsigned long arg) | 1275 | static void l2cap_monitor_timeout(struct work_struct *work) |
1192 | { | 1276 | { |
1193 | struct l2cap_chan *chan = (void *) arg; | 1277 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1278 | monitor_timer.work); | ||
1194 | struct sock *sk = chan->sk; | 1279 | struct sock *sk = chan->sk; |
1195 | 1280 | ||
1196 | BT_DBG("chan %p", chan); | 1281 | BT_DBG("chan %p", chan); |
1197 | 1282 | ||
1198 | bh_lock_sock(sk); | 1283 | lock_sock(sk); |
1199 | if (chan->retry_count >= chan->remote_max_tx) { | 1284 | if (chan->retry_count >= chan->remote_max_tx) { |
1200 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); | 1285 | l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); |
1201 | bh_unlock_sock(sk); | 1286 | release_sock(sk); |
1202 | return; | 1287 | return; |
1203 | } | 1288 | } |
1204 | 1289 | ||
@@ -1206,24 +1291,25 @@ static void l2cap_monitor_timeout(unsigned long arg) | |||
1206 | __set_monitor_timer(chan); | 1291 | __set_monitor_timer(chan); |
1207 | 1292 | ||
1208 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); | 1293 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); |
1209 | bh_unlock_sock(sk); | 1294 | release_sock(sk); |
1210 | } | 1295 | } |
1211 | 1296 | ||
1212 | static void l2cap_retrans_timeout(unsigned long arg) | 1297 | static void l2cap_retrans_timeout(struct work_struct *work) |
1213 | { | 1298 | { |
1214 | struct l2cap_chan *chan = (void *) arg; | 1299 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1300 | retrans_timer.work); | ||
1215 | struct sock *sk = chan->sk; | 1301 | struct sock *sk = chan->sk; |
1216 | 1302 | ||
1217 | BT_DBG("chan %p", chan); | 1303 | BT_DBG("chan %p", chan); |
1218 | 1304 | ||
1219 | bh_lock_sock(sk); | 1305 | lock_sock(sk); |
1220 | chan->retry_count = 1; | 1306 | chan->retry_count = 1; |
1221 | __set_monitor_timer(chan); | 1307 | __set_monitor_timer(chan); |
1222 | 1308 | ||
1223 | set_bit(CONN_WAIT_F, &chan->conn_state); | 1309 | set_bit(CONN_WAIT_F, &chan->conn_state); |
1224 | 1310 | ||
1225 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); | 1311 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); |
1226 | bh_unlock_sock(sk); | 1312 | release_sock(sk); |
1227 | } | 1313 | } |
1228 | 1314 | ||
1229 | static void l2cap_drop_acked_frames(struct l2cap_chan *chan) | 1315 | static void l2cap_drop_acked_frames(struct l2cap_chan *chan) |
@@ -1245,60 +1331,46 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan) | |||
1245 | __clear_retrans_timer(chan); | 1331 | __clear_retrans_timer(chan); |
1246 | } | 1332 | } |
1247 | 1333 | ||
1248 | static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) | ||
1249 | { | ||
1250 | struct hci_conn *hcon = chan->conn->hcon; | ||
1251 | u16 flags; | ||
1252 | |||
1253 | BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len); | ||
1254 | |||
1255 | if (!chan->flushable && lmp_no_flush_capable(hcon->hdev)) | ||
1256 | flags = ACL_START_NO_FLUSH; | ||
1257 | else | ||
1258 | flags = ACL_START; | ||
1259 | |||
1260 | bt_cb(skb)->force_active = chan->force_active; | ||
1261 | hci_send_acl(hcon, skb, flags); | ||
1262 | } | ||
1263 | |||
1264 | static void l2cap_streaming_send(struct l2cap_chan *chan) | 1334 | static void l2cap_streaming_send(struct l2cap_chan *chan) |
1265 | { | 1335 | { |
1266 | struct sk_buff *skb; | 1336 | struct sk_buff *skb; |
1267 | u16 control, fcs; | 1337 | u32 control; |
1338 | u16 fcs; | ||
1268 | 1339 | ||
1269 | while ((skb = skb_dequeue(&chan->tx_q))) { | 1340 | while ((skb = skb_dequeue(&chan->tx_q))) { |
1270 | control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); | 1341 | control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); |
1271 | control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; | 1342 | control |= __set_txseq(chan, chan->next_tx_seq); |
1272 | put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); | 1343 | __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); |
1273 | 1344 | ||
1274 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1345 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1275 | fcs = crc16(0, (u8 *)skb->data, skb->len - 2); | 1346 | fcs = crc16(0, (u8 *)skb->data, |
1276 | put_unaligned_le16(fcs, skb->data + skb->len - 2); | 1347 | skb->len - L2CAP_FCS_SIZE); |
1348 | put_unaligned_le16(fcs, | ||
1349 | skb->data + skb->len - L2CAP_FCS_SIZE); | ||
1277 | } | 1350 | } |
1278 | 1351 | ||
1279 | l2cap_do_send(chan, skb); | 1352 | l2cap_do_send(chan, skb); |
1280 | 1353 | ||
1281 | chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; | 1354 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); |
1282 | } | 1355 | } |
1283 | } | 1356 | } |
1284 | 1357 | ||
1285 | static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) | 1358 | static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) |
1286 | { | 1359 | { |
1287 | struct sk_buff *skb, *tx_skb; | 1360 | struct sk_buff *skb, *tx_skb; |
1288 | u16 control, fcs; | 1361 | u16 fcs; |
1362 | u32 control; | ||
1289 | 1363 | ||
1290 | skb = skb_peek(&chan->tx_q); | 1364 | skb = skb_peek(&chan->tx_q); |
1291 | if (!skb) | 1365 | if (!skb) |
1292 | return; | 1366 | return; |
1293 | 1367 | ||
1294 | do { | 1368 | while (bt_cb(skb)->tx_seq != tx_seq) { |
1295 | if (bt_cb(skb)->tx_seq == tx_seq) | ||
1296 | break; | ||
1297 | |||
1298 | if (skb_queue_is_last(&chan->tx_q, skb)) | 1369 | if (skb_queue_is_last(&chan->tx_q, skb)) |
1299 | return; | 1370 | return; |
1300 | 1371 | ||
1301 | } while ((skb = skb_queue_next(&chan->tx_q, skb))); | 1372 | skb = skb_queue_next(&chan->tx_q, skb); |
1373 | } | ||
1302 | 1374 | ||
1303 | if (chan->remote_max_tx && | 1375 | if (chan->remote_max_tx && |
1304 | bt_cb(skb)->retries == chan->remote_max_tx) { | 1376 | bt_cb(skb)->retries == chan->remote_max_tx) { |
@@ -1308,20 +1380,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) | |||
1308 | 1380 | ||
1309 | tx_skb = skb_clone(skb, GFP_ATOMIC); | 1381 | tx_skb = skb_clone(skb, GFP_ATOMIC); |
1310 | bt_cb(skb)->retries++; | 1382 | bt_cb(skb)->retries++; |
1311 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | 1383 | |
1312 | control &= L2CAP_CTRL_SAR; | 1384 | control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); |
1385 | control &= __get_sar_mask(chan); | ||
1313 | 1386 | ||
1314 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 1387 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
1315 | control |= L2CAP_CTRL_FINAL; | 1388 | control |= __set_ctrl_final(chan); |
1316 | 1389 | ||
1317 | control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | 1390 | control |= __set_reqseq(chan, chan->buffer_seq); |
1318 | | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); | 1391 | control |= __set_txseq(chan, tx_seq); |
1319 | 1392 | ||
1320 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | 1393 | __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); |
1321 | 1394 | ||
1322 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1395 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1323 | fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); | 1396 | fcs = crc16(0, (u8 *)tx_skb->data, |
1324 | put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); | 1397 | tx_skb->len - L2CAP_FCS_SIZE); |
1398 | put_unaligned_le16(fcs, | ||
1399 | tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); | ||
1325 | } | 1400 | } |
1326 | 1401 | ||
1327 | l2cap_do_send(chan, tx_skb); | 1402 | l2cap_do_send(chan, tx_skb); |
@@ -1330,7 +1405,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) | |||
1330 | static int l2cap_ertm_send(struct l2cap_chan *chan) | 1405 | static int l2cap_ertm_send(struct l2cap_chan *chan) |
1331 | { | 1406 | { |
1332 | struct sk_buff *skb, *tx_skb; | 1407 | struct sk_buff *skb, *tx_skb; |
1333 | u16 control, fcs; | 1408 | u16 fcs; |
1409 | u32 control; | ||
1334 | int nsent = 0; | 1410 | int nsent = 0; |
1335 | 1411 | ||
1336 | if (chan->state != BT_CONNECTED) | 1412 | if (chan->state != BT_CONNECTED) |
@@ -1348,20 +1424,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) | |||
1348 | 1424 | ||
1349 | bt_cb(skb)->retries++; | 1425 | bt_cb(skb)->retries++; |
1350 | 1426 | ||
1351 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | 1427 | control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); |
1352 | control &= L2CAP_CTRL_SAR; | 1428 | control &= __get_sar_mask(chan); |
1353 | 1429 | ||
1354 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) | 1430 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) |
1355 | control |= L2CAP_CTRL_FINAL; | 1431 | control |= __set_ctrl_final(chan); |
1356 | 1432 | ||
1357 | control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | 1433 | control |= __set_reqseq(chan, chan->buffer_seq); |
1358 | | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); | 1434 | control |= __set_txseq(chan, chan->next_tx_seq); |
1359 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | ||
1360 | 1435 | ||
1436 | __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); | ||
1361 | 1437 | ||
1362 | if (chan->fcs == L2CAP_FCS_CRC16) { | 1438 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1363 | fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); | 1439 | fcs = crc16(0, (u8 *)skb->data, |
1364 | put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); | 1440 | tx_skb->len - L2CAP_FCS_SIZE); |
1441 | put_unaligned_le16(fcs, skb->data + | ||
1442 | tx_skb->len - L2CAP_FCS_SIZE); | ||
1365 | } | 1443 | } |
1366 | 1444 | ||
1367 | l2cap_do_send(chan, tx_skb); | 1445 | l2cap_do_send(chan, tx_skb); |
@@ -1369,7 +1447,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) | |||
1369 | __set_retrans_timer(chan); | 1447 | __set_retrans_timer(chan); |
1370 | 1448 | ||
1371 | bt_cb(skb)->tx_seq = chan->next_tx_seq; | 1449 | bt_cb(skb)->tx_seq = chan->next_tx_seq; |
1372 | chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; | 1450 | |
1451 | chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); | ||
1373 | 1452 | ||
1374 | if (bt_cb(skb)->retries == 1) | 1453 | if (bt_cb(skb)->retries == 1) |
1375 | chan->unacked_frames++; | 1454 | chan->unacked_frames++; |
@@ -1401,12 +1480,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan) | |||
1401 | 1480 | ||
1402 | static void l2cap_send_ack(struct l2cap_chan *chan) | 1481 | static void l2cap_send_ack(struct l2cap_chan *chan) |
1403 | { | 1482 | { |
1404 | u16 control = 0; | 1483 | u32 control = 0; |
1405 | 1484 | ||
1406 | control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 1485 | control |= __set_reqseq(chan, chan->buffer_seq); |
1407 | 1486 | ||
1408 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 1487 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
1409 | control |= L2CAP_SUPER_RCV_NOT_READY; | 1488 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); |
1410 | set_bit(CONN_RNR_SENT, &chan->conn_state); | 1489 | set_bit(CONN_RNR_SENT, &chan->conn_state); |
1411 | l2cap_send_sframe(chan, control); | 1490 | l2cap_send_sframe(chan, control); |
1412 | return; | 1491 | return; |
@@ -1415,20 +1494,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan) | |||
1415 | if (l2cap_ertm_send(chan) > 0) | 1494 | if (l2cap_ertm_send(chan) > 0) |
1416 | return; | 1495 | return; |
1417 | 1496 | ||
1418 | control |= L2CAP_SUPER_RCV_READY; | 1497 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); |
1419 | l2cap_send_sframe(chan, control); | 1498 | l2cap_send_sframe(chan, control); |
1420 | } | 1499 | } |
1421 | 1500 | ||
1422 | static void l2cap_send_srejtail(struct l2cap_chan *chan) | 1501 | static void l2cap_send_srejtail(struct l2cap_chan *chan) |
1423 | { | 1502 | { |
1424 | struct srej_list *tail; | 1503 | struct srej_list *tail; |
1425 | u16 control; | 1504 | u32 control; |
1426 | 1505 | ||
1427 | control = L2CAP_SUPER_SELECT_REJECT; | 1506 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); |
1428 | control |= L2CAP_CTRL_FINAL; | 1507 | control |= __set_ctrl_final(chan); |
1429 | 1508 | ||
1430 | tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); | 1509 | tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); |
1431 | control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 1510 | control |= __set_reqseq(chan, tail->tx_seq); |
1432 | 1511 | ||
1433 | l2cap_send_sframe(chan, control); | 1512 | l2cap_send_sframe(chan, control); |
1434 | } | 1513 | } |
@@ -1456,6 +1535,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in | |||
1456 | if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) | 1535 | if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) |
1457 | return -EFAULT; | 1536 | return -EFAULT; |
1458 | 1537 | ||
1538 | (*frag)->priority = skb->priority; | ||
1539 | |||
1459 | sent += count; | 1540 | sent += count; |
1460 | len -= count; | 1541 | len -= count; |
1461 | 1542 | ||
@@ -1465,15 +1546,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in | |||
1465 | return sent; | 1546 | return sent; |
1466 | } | 1547 | } |
1467 | 1548 | ||
1468 | static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) | 1549 | static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, |
1550 | struct msghdr *msg, size_t len, | ||
1551 | u32 priority) | ||
1469 | { | 1552 | { |
1470 | struct sock *sk = chan->sk; | 1553 | struct sock *sk = chan->sk; |
1471 | struct l2cap_conn *conn = chan->conn; | 1554 | struct l2cap_conn *conn = chan->conn; |
1472 | struct sk_buff *skb; | 1555 | struct sk_buff *skb; |
1473 | int err, count, hlen = L2CAP_HDR_SIZE + 2; | 1556 | int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; |
1474 | struct l2cap_hdr *lh; | 1557 | struct l2cap_hdr *lh; |
1475 | 1558 | ||
1476 | BT_DBG("sk %p len %d", sk, (int)len); | 1559 | BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); |
1477 | 1560 | ||
1478 | count = min_t(unsigned int, (conn->mtu - hlen), len); | 1561 | count = min_t(unsigned int, (conn->mtu - hlen), len); |
1479 | skb = bt_skb_send_alloc(sk, count + hlen, | 1562 | skb = bt_skb_send_alloc(sk, count + hlen, |
@@ -1481,6 +1564,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct | |||
1481 | if (!skb) | 1564 | if (!skb) |
1482 | return ERR_PTR(err); | 1565 | return ERR_PTR(err); |
1483 | 1566 | ||
1567 | skb->priority = priority; | ||
1568 | |||
1484 | /* Create L2CAP header */ | 1569 | /* Create L2CAP header */ |
1485 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 1570 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
1486 | lh->cid = cpu_to_le16(chan->dcid); | 1571 | lh->cid = cpu_to_le16(chan->dcid); |
@@ -1495,7 +1580,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct | |||
1495 | return skb; | 1580 | return skb; |
1496 | } | 1581 | } |
1497 | 1582 | ||
1498 | static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) | 1583 | static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, |
1584 | struct msghdr *msg, size_t len, | ||
1585 | u32 priority) | ||
1499 | { | 1586 | { |
1500 | struct sock *sk = chan->sk; | 1587 | struct sock *sk = chan->sk; |
1501 | struct l2cap_conn *conn = chan->conn; | 1588 | struct l2cap_conn *conn = chan->conn; |
@@ -1511,6 +1598,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms | |||
1511 | if (!skb) | 1598 | if (!skb) |
1512 | return ERR_PTR(err); | 1599 | return ERR_PTR(err); |
1513 | 1600 | ||
1601 | skb->priority = priority; | ||
1602 | |||
1514 | /* Create L2CAP header */ | 1603 | /* Create L2CAP header */ |
1515 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 1604 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
1516 | lh->cid = cpu_to_le16(chan->dcid); | 1605 | lh->cid = cpu_to_le16(chan->dcid); |
@@ -1526,12 +1615,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms | |||
1526 | 1615 | ||
1527 | static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | 1616 | static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, |
1528 | struct msghdr *msg, size_t len, | 1617 | struct msghdr *msg, size_t len, |
1529 | u16 control, u16 sdulen) | 1618 | u32 control, u16 sdulen) |
1530 | { | 1619 | { |
1531 | struct sock *sk = chan->sk; | 1620 | struct sock *sk = chan->sk; |
1532 | struct l2cap_conn *conn = chan->conn; | 1621 | struct l2cap_conn *conn = chan->conn; |
1533 | struct sk_buff *skb; | 1622 | struct sk_buff *skb; |
1534 | int err, count, hlen = L2CAP_HDR_SIZE + 2; | 1623 | int err, count, hlen; |
1535 | struct l2cap_hdr *lh; | 1624 | struct l2cap_hdr *lh; |
1536 | 1625 | ||
1537 | BT_DBG("sk %p len %d", sk, (int)len); | 1626 | BT_DBG("sk %p len %d", sk, (int)len); |
@@ -1539,11 +1628,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1539 | if (!conn) | 1628 | if (!conn) |
1540 | return ERR_PTR(-ENOTCONN); | 1629 | return ERR_PTR(-ENOTCONN); |
1541 | 1630 | ||
1631 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
1632 | hlen = L2CAP_EXT_HDR_SIZE; | ||
1633 | else | ||
1634 | hlen = L2CAP_ENH_HDR_SIZE; | ||
1635 | |||
1542 | if (sdulen) | 1636 | if (sdulen) |
1543 | hlen += 2; | 1637 | hlen += L2CAP_SDULEN_SIZE; |
1544 | 1638 | ||
1545 | if (chan->fcs == L2CAP_FCS_CRC16) | 1639 | if (chan->fcs == L2CAP_FCS_CRC16) |
1546 | hlen += 2; | 1640 | hlen += L2CAP_FCS_SIZE; |
1547 | 1641 | ||
1548 | count = min_t(unsigned int, (conn->mtu - hlen), len); | 1642 | count = min_t(unsigned int, (conn->mtu - hlen), len); |
1549 | skb = bt_skb_send_alloc(sk, count + hlen, | 1643 | skb = bt_skb_send_alloc(sk, count + hlen, |
@@ -1555,9 +1649,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1555 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 1649 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
1556 | lh->cid = cpu_to_le16(chan->dcid); | 1650 | lh->cid = cpu_to_le16(chan->dcid); |
1557 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | 1651 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); |
1558 | put_unaligned_le16(control, skb_put(skb, 2)); | 1652 | |
1653 | __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); | ||
1654 | |||
1559 | if (sdulen) | 1655 | if (sdulen) |
1560 | put_unaligned_le16(sdulen, skb_put(skb, 2)); | 1656 | put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); |
1561 | 1657 | ||
1562 | err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); | 1658 | err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); |
1563 | if (unlikely(err < 0)) { | 1659 | if (unlikely(err < 0)) { |
@@ -1566,7 +1662,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, | |||
1566 | } | 1662 | } |
1567 | 1663 | ||
1568 | if (chan->fcs == L2CAP_FCS_CRC16) | 1664 | if (chan->fcs == L2CAP_FCS_CRC16) |
1569 | put_unaligned_le16(0, skb_put(skb, 2)); | 1665 | put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); |
1570 | 1666 | ||
1571 | bt_cb(skb)->retries = 0; | 1667 | bt_cb(skb)->retries = 0; |
1572 | return skb; | 1668 | return skb; |
@@ -1576,11 +1672,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si | |||
1576 | { | 1672 | { |
1577 | struct sk_buff *skb; | 1673 | struct sk_buff *skb; |
1578 | struct sk_buff_head sar_queue; | 1674 | struct sk_buff_head sar_queue; |
1579 | u16 control; | 1675 | u32 control; |
1580 | size_t size = 0; | 1676 | size_t size = 0; |
1581 | 1677 | ||
1582 | skb_queue_head_init(&sar_queue); | 1678 | skb_queue_head_init(&sar_queue); |
1583 | control = L2CAP_SDU_START; | 1679 | control = __set_ctrl_sar(chan, L2CAP_SAR_START); |
1584 | skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); | 1680 | skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); |
1585 | if (IS_ERR(skb)) | 1681 | if (IS_ERR(skb)) |
1586 | return PTR_ERR(skb); | 1682 | return PTR_ERR(skb); |
@@ -1593,10 +1689,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si | |||
1593 | size_t buflen; | 1689 | size_t buflen; |
1594 | 1690 | ||
1595 | if (len > chan->remote_mps) { | 1691 | if (len > chan->remote_mps) { |
1596 | control = L2CAP_SDU_CONTINUE; | 1692 | control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); |
1597 | buflen = chan->remote_mps; | 1693 | buflen = chan->remote_mps; |
1598 | } else { | 1694 | } else { |
1599 | control = L2CAP_SDU_END; | 1695 | control = __set_ctrl_sar(chan, L2CAP_SAR_END); |
1600 | buflen = len; | 1696 | buflen = len; |
1601 | } | 1697 | } |
1602 | 1698 | ||
@@ -1617,15 +1713,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si | |||
1617 | return size; | 1713 | return size; |
1618 | } | 1714 | } |
1619 | 1715 | ||
1620 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) | 1716 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, |
1717 | u32 priority) | ||
1621 | { | 1718 | { |
1622 | struct sk_buff *skb; | 1719 | struct sk_buff *skb; |
1623 | u16 control; | 1720 | u32 control; |
1624 | int err; | 1721 | int err; |
1625 | 1722 | ||
1626 | /* Connectionless channel */ | 1723 | /* Connectionless channel */ |
1627 | if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { | 1724 | if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { |
1628 | skb = l2cap_create_connless_pdu(chan, msg, len); | 1725 | skb = l2cap_create_connless_pdu(chan, msg, len, priority); |
1629 | if (IS_ERR(skb)) | 1726 | if (IS_ERR(skb)) |
1630 | return PTR_ERR(skb); | 1727 | return PTR_ERR(skb); |
1631 | 1728 | ||
@@ -1640,7 +1737,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) | |||
1640 | return -EMSGSIZE; | 1737 | return -EMSGSIZE; |
1641 | 1738 | ||
1642 | /* Create a basic PDU */ | 1739 | /* Create a basic PDU */ |
1643 | skb = l2cap_create_basic_pdu(chan, msg, len); | 1740 | skb = l2cap_create_basic_pdu(chan, msg, len, priority); |
1644 | if (IS_ERR(skb)) | 1741 | if (IS_ERR(skb)) |
1645 | return PTR_ERR(skb); | 1742 | return PTR_ERR(skb); |
1646 | 1743 | ||
@@ -1652,7 +1749,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) | |||
1652 | case L2CAP_MODE_STREAMING: | 1749 | case L2CAP_MODE_STREAMING: |
1653 | /* Entire SDU fits into one PDU */ | 1750 | /* Entire SDU fits into one PDU */ |
1654 | if (len <= chan->remote_mps) { | 1751 | if (len <= chan->remote_mps) { |
1655 | control = L2CAP_SDU_UNSEGMENTED; | 1752 | control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); |
1656 | skb = l2cap_create_iframe_pdu(chan, msg, len, control, | 1753 | skb = l2cap_create_iframe_pdu(chan, msg, len, control, |
1657 | 0); | 1754 | 0); |
1658 | if (IS_ERR(skb)) | 1755 | if (IS_ERR(skb)) |
@@ -1704,8 +1801,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1704 | 1801 | ||
1705 | BT_DBG("conn %p", conn); | 1802 | BT_DBG("conn %p", conn); |
1706 | 1803 | ||
1707 | read_lock(&conn->chan_lock); | 1804 | rcu_read_lock(); |
1708 | list_for_each_entry(chan, &conn->chan_l, list) { | 1805 | |
1806 | list_for_each_entry_rcu(chan, &conn->chan_l, list) { | ||
1709 | struct sock *sk = chan->sk; | 1807 | struct sock *sk = chan->sk; |
1710 | if (chan->chan_type != L2CAP_CHAN_RAW) | 1808 | if (chan->chan_type != L2CAP_CHAN_RAW) |
1711 | continue; | 1809 | continue; |
@@ -1720,7 +1818,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1720 | if (chan->ops->recv(chan->data, nskb)) | 1818 | if (chan->ops->recv(chan->data, nskb)) |
1721 | kfree_skb(nskb); | 1819 | kfree_skb(nskb); |
1722 | } | 1820 | } |
1723 | read_unlock(&conn->chan_lock); | 1821 | |
1822 | rcu_read_unlock(); | ||
1724 | } | 1823 | } |
1725 | 1824 | ||
1726 | /* ---- L2CAP signalling commands ---- */ | 1825 | /* ---- L2CAP signalling commands ---- */ |
@@ -1850,37 +1949,64 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) | |||
1850 | *ptr += L2CAP_CONF_OPT_SIZE + len; | 1949 | *ptr += L2CAP_CONF_OPT_SIZE + len; |
1851 | } | 1950 | } |
1852 | 1951 | ||
1853 | static void l2cap_ack_timeout(unsigned long arg) | 1952 | static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) |
1854 | { | 1953 | { |
1855 | struct l2cap_chan *chan = (void *) arg; | 1954 | struct l2cap_conf_efs efs; |
1955 | |||
1956 | switch (chan->mode) { | ||
1957 | case L2CAP_MODE_ERTM: | ||
1958 | efs.id = chan->local_id; | ||
1959 | efs.stype = chan->local_stype; | ||
1960 | efs.msdu = cpu_to_le16(chan->local_msdu); | ||
1961 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); | ||
1962 | efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); | ||
1963 | efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); | ||
1964 | break; | ||
1965 | |||
1966 | case L2CAP_MODE_STREAMING: | ||
1967 | efs.id = 1; | ||
1968 | efs.stype = L2CAP_SERV_BESTEFFORT; | ||
1969 | efs.msdu = cpu_to_le16(chan->local_msdu); | ||
1970 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); | ||
1971 | efs.acc_lat = 0; | ||
1972 | efs.flush_to = 0; | ||
1973 | break; | ||
1974 | |||
1975 | default: | ||
1976 | return; | ||
1977 | } | ||
1978 | |||
1979 | l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), | ||
1980 | (unsigned long) &efs); | ||
1981 | } | ||
1856 | 1982 | ||
1857 | bh_lock_sock(chan->sk); | 1983 | static void l2cap_ack_timeout(struct work_struct *work) |
1984 | { | ||
1985 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, | ||
1986 | ack_timer.work); | ||
1987 | |||
1988 | BT_DBG("chan %p", chan); | ||
1989 | |||
1990 | lock_sock(chan->sk); | ||
1858 | l2cap_send_ack(chan); | 1991 | l2cap_send_ack(chan); |
1859 | bh_unlock_sock(chan->sk); | 1992 | release_sock(chan->sk); |
1860 | } | 1993 | } |
1861 | 1994 | ||
1862 | static inline void l2cap_ertm_init(struct l2cap_chan *chan) | 1995 | static inline void l2cap_ertm_init(struct l2cap_chan *chan) |
1863 | { | 1996 | { |
1864 | struct sock *sk = chan->sk; | ||
1865 | |||
1866 | chan->expected_ack_seq = 0; | 1997 | chan->expected_ack_seq = 0; |
1867 | chan->unacked_frames = 0; | 1998 | chan->unacked_frames = 0; |
1868 | chan->buffer_seq = 0; | 1999 | chan->buffer_seq = 0; |
1869 | chan->num_acked = 0; | 2000 | chan->num_acked = 0; |
1870 | chan->frames_sent = 0; | 2001 | chan->frames_sent = 0; |
1871 | 2002 | ||
1872 | setup_timer(&chan->retrans_timer, l2cap_retrans_timeout, | 2003 | INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); |
1873 | (unsigned long) chan); | 2004 | INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); |
1874 | setup_timer(&chan->monitor_timer, l2cap_monitor_timeout, | 2005 | INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); |
1875 | (unsigned long) chan); | ||
1876 | setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); | ||
1877 | 2006 | ||
1878 | skb_queue_head_init(&chan->srej_q); | 2007 | skb_queue_head_init(&chan->srej_q); |
1879 | 2008 | ||
1880 | INIT_LIST_HEAD(&chan->srej_l); | 2009 | INIT_LIST_HEAD(&chan->srej_l); |
1881 | |||
1882 | |||
1883 | sk->sk_backlog_rcv = l2cap_ertm_data_rcv; | ||
1884 | } | 2010 | } |
1885 | 2011 | ||
1886 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) | 2012 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) |
@@ -1896,11 +2022,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) | |||
1896 | } | 2022 | } |
1897 | } | 2023 | } |
1898 | 2024 | ||
2025 | static inline bool __l2cap_ews_supported(struct l2cap_chan *chan) | ||
2026 | { | ||
2027 | return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; | ||
2028 | } | ||
2029 | |||
2030 | static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) | ||
2031 | { | ||
2032 | return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; | ||
2033 | } | ||
2034 | |||
2035 | static inline void l2cap_txwin_setup(struct l2cap_chan *chan) | ||
2036 | { | ||
2037 | if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && | ||
2038 | __l2cap_ews_supported(chan)) { | ||
2039 | /* use extended control field */ | ||
2040 | set_bit(FLAG_EXT_CTRL, &chan->flags); | ||
2041 | chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; | ||
2042 | } else { | ||
2043 | chan->tx_win = min_t(u16, chan->tx_win, | ||
2044 | L2CAP_DEFAULT_TX_WINDOW); | ||
2045 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; | ||
2046 | } | ||
2047 | } | ||
2048 | |||
1899 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) | 2049 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) |
1900 | { | 2050 | { |
1901 | struct l2cap_conf_req *req = data; | 2051 | struct l2cap_conf_req *req = data; |
1902 | struct l2cap_conf_rfc rfc = { .mode = chan->mode }; | 2052 | struct l2cap_conf_rfc rfc = { .mode = chan->mode }; |
1903 | void *ptr = req->data; | 2053 | void *ptr = req->data; |
2054 | u16 size; | ||
1904 | 2055 | ||
1905 | BT_DBG("chan %p", chan); | 2056 | BT_DBG("chan %p", chan); |
1906 | 2057 | ||
@@ -1913,6 +2064,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) | |||
1913 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) | 2064 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) |
1914 | break; | 2065 | break; |
1915 | 2066 | ||
2067 | if (__l2cap_efs_supported(chan)) | ||
2068 | set_bit(FLAG_EFS_ENABLE, &chan->flags); | ||
2069 | |||
1916 | /* fall through */ | 2070 | /* fall through */ |
1917 | default: | 2071 | default: |
1918 | chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); | 2072 | chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); |
@@ -1942,17 +2096,27 @@ done: | |||
1942 | 2096 | ||
1943 | case L2CAP_MODE_ERTM: | 2097 | case L2CAP_MODE_ERTM: |
1944 | rfc.mode = L2CAP_MODE_ERTM; | 2098 | rfc.mode = L2CAP_MODE_ERTM; |
1945 | rfc.txwin_size = chan->tx_win; | ||
1946 | rfc.max_transmit = chan->max_tx; | 2099 | rfc.max_transmit = chan->max_tx; |
1947 | rfc.retrans_timeout = 0; | 2100 | rfc.retrans_timeout = 0; |
1948 | rfc.monitor_timeout = 0; | 2101 | rfc.monitor_timeout = 0; |
1949 | rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); | 2102 | |
1950 | if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) | 2103 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
1951 | rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); | 2104 | L2CAP_EXT_HDR_SIZE - |
2105 | L2CAP_SDULEN_SIZE - | ||
2106 | L2CAP_FCS_SIZE); | ||
2107 | rfc.max_pdu_size = cpu_to_le16(size); | ||
2108 | |||
2109 | l2cap_txwin_setup(chan); | ||
2110 | |||
2111 | rfc.txwin_size = min_t(u16, chan->tx_win, | ||
2112 | L2CAP_DEFAULT_TX_WINDOW); | ||
1952 | 2113 | ||
1953 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), | 2114 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
1954 | (unsigned long) &rfc); | 2115 | (unsigned long) &rfc); |
1955 | 2116 | ||
2117 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) | ||
2118 | l2cap_add_opt_efs(&ptr, chan); | ||
2119 | |||
1956 | if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) | 2120 | if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) |
1957 | break; | 2121 | break; |
1958 | 2122 | ||
@@ -1961,6 +2125,10 @@ done: | |||
1961 | chan->fcs = L2CAP_FCS_NONE; | 2125 | chan->fcs = L2CAP_FCS_NONE; |
1962 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); | 2126 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); |
1963 | } | 2127 | } |
2128 | |||
2129 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
2130 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, | ||
2131 | chan->tx_win); | ||
1964 | break; | 2132 | break; |
1965 | 2133 | ||
1966 | case L2CAP_MODE_STREAMING: | 2134 | case L2CAP_MODE_STREAMING: |
@@ -1969,13 +2137,19 @@ done: | |||
1969 | rfc.max_transmit = 0; | 2137 | rfc.max_transmit = 0; |
1970 | rfc.retrans_timeout = 0; | 2138 | rfc.retrans_timeout = 0; |
1971 | rfc.monitor_timeout = 0; | 2139 | rfc.monitor_timeout = 0; |
1972 | rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); | 2140 | |
1973 | if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) | 2141 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
1974 | rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); | 2142 | L2CAP_EXT_HDR_SIZE - |
2143 | L2CAP_SDULEN_SIZE - | ||
2144 | L2CAP_FCS_SIZE); | ||
2145 | rfc.max_pdu_size = cpu_to_le16(size); | ||
1975 | 2146 | ||
1976 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), | 2147 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
1977 | (unsigned long) &rfc); | 2148 | (unsigned long) &rfc); |
1978 | 2149 | ||
2150 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) | ||
2151 | l2cap_add_opt_efs(&ptr, chan); | ||
2152 | |||
1979 | if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) | 2153 | if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) |
1980 | break; | 2154 | break; |
1981 | 2155 | ||
@@ -2002,8 +2176,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) | |||
2002 | int type, hint, olen; | 2176 | int type, hint, olen; |
2003 | unsigned long val; | 2177 | unsigned long val; |
2004 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; | 2178 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; |
2179 | struct l2cap_conf_efs efs; | ||
2180 | u8 remote_efs = 0; | ||
2005 | u16 mtu = L2CAP_DEFAULT_MTU; | 2181 | u16 mtu = L2CAP_DEFAULT_MTU; |
2006 | u16 result = L2CAP_CONF_SUCCESS; | 2182 | u16 result = L2CAP_CONF_SUCCESS; |
2183 | u16 size; | ||
2007 | 2184 | ||
2008 | BT_DBG("chan %p", chan); | 2185 | BT_DBG("chan %p", chan); |
2009 | 2186 | ||
@@ -2033,7 +2210,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) | |||
2033 | case L2CAP_CONF_FCS: | 2210 | case L2CAP_CONF_FCS: |
2034 | if (val == L2CAP_FCS_NONE) | 2211 | if (val == L2CAP_FCS_NONE) |
2035 | set_bit(CONF_NO_FCS_RECV, &chan->conf_state); | 2212 | set_bit(CONF_NO_FCS_RECV, &chan->conf_state); |
2213 | break; | ||
2036 | 2214 | ||
2215 | case L2CAP_CONF_EFS: | ||
2216 | remote_efs = 1; | ||
2217 | if (olen == sizeof(efs)) | ||
2218 | memcpy(&efs, (void *) val, olen); | ||
2219 | break; | ||
2220 | |||
2221 | case L2CAP_CONF_EWS: | ||
2222 | if (!enable_hs) | ||
2223 | return -ECONNREFUSED; | ||
2224 | |||
2225 | set_bit(FLAG_EXT_CTRL, &chan->flags); | ||
2226 | set_bit(CONF_EWS_RECV, &chan->conf_state); | ||
2227 | chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; | ||
2228 | chan->remote_tx_win = val; | ||
2037 | break; | 2229 | break; |
2038 | 2230 | ||
2039 | default: | 2231 | default: |
@@ -2058,6 +2250,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) | |||
2058 | break; | 2250 | break; |
2059 | } | 2251 | } |
2060 | 2252 | ||
2253 | if (remote_efs) { | ||
2254 | if (__l2cap_efs_supported(chan)) | ||
2255 | set_bit(FLAG_EFS_ENABLE, &chan->flags); | ||
2256 | else | ||
2257 | return -ECONNREFUSED; | ||
2258 | } | ||
2259 | |||
2061 | if (chan->mode != rfc.mode) | 2260 | if (chan->mode != rfc.mode) |
2062 | return -ECONNREFUSED; | 2261 | return -ECONNREFUSED; |
2063 | 2262 | ||
@@ -2076,7 +2275,6 @@ done: | |||
2076 | sizeof(rfc), (unsigned long) &rfc); | 2275 | sizeof(rfc), (unsigned long) &rfc); |
2077 | } | 2276 | } |
2078 | 2277 | ||
2079 | |||
2080 | if (result == L2CAP_CONF_SUCCESS) { | 2278 | if (result == L2CAP_CONF_SUCCESS) { |
2081 | /* Configure output options and let the other side know | 2279 | /* Configure output options and let the other side know |
2082 | * which ones we don't like. */ | 2280 | * which ones we don't like. */ |
@@ -2089,6 +2287,26 @@ done: | |||
2089 | } | 2287 | } |
2090 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); | 2288 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); |
2091 | 2289 | ||
2290 | if (remote_efs) { | ||
2291 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | ||
2292 | efs.stype != L2CAP_SERV_NOTRAFIC && | ||
2293 | efs.stype != chan->local_stype) { | ||
2294 | |||
2295 | result = L2CAP_CONF_UNACCEPT; | ||
2296 | |||
2297 | if (chan->num_conf_req >= 1) | ||
2298 | return -ECONNREFUSED; | ||
2299 | |||
2300 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | ||
2301 | sizeof(efs), | ||
2302 | (unsigned long) &efs); | ||
2303 | } else { | ||
2304 | /* Send PENDING Conf Rsp */ | ||
2305 | result = L2CAP_CONF_PENDING; | ||
2306 | set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | ||
2307 | } | ||
2308 | } | ||
2309 | |||
2092 | switch (rfc.mode) { | 2310 | switch (rfc.mode) { |
2093 | case L2CAP_MODE_BASIC: | 2311 | case L2CAP_MODE_BASIC: |
2094 | chan->fcs = L2CAP_FCS_NONE; | 2312 | chan->fcs = L2CAP_FCS_NONE; |
@@ -2096,13 +2314,20 @@ done: | |||
2096 | break; | 2314 | break; |
2097 | 2315 | ||
2098 | case L2CAP_MODE_ERTM: | 2316 | case L2CAP_MODE_ERTM: |
2099 | chan->remote_tx_win = rfc.txwin_size; | 2317 | if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) |
2100 | chan->remote_max_tx = rfc.max_transmit; | 2318 | chan->remote_tx_win = rfc.txwin_size; |
2319 | else | ||
2320 | rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; | ||
2101 | 2321 | ||
2102 | if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) | 2322 | chan->remote_max_tx = rfc.max_transmit; |
2103 | rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); | ||
2104 | 2323 | ||
2105 | chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); | 2324 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
2325 | chan->conn->mtu - | ||
2326 | L2CAP_EXT_HDR_SIZE - | ||
2327 | L2CAP_SDULEN_SIZE - | ||
2328 | L2CAP_FCS_SIZE); | ||
2329 | rfc.max_pdu_size = cpu_to_le16(size); | ||
2330 | chan->remote_mps = size; | ||
2106 | 2331 | ||
2107 | rfc.retrans_timeout = | 2332 | rfc.retrans_timeout = |
2108 | le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); | 2333 | le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); |
@@ -2114,13 +2339,29 @@ done: | |||
2114 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 2339 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, |
2115 | sizeof(rfc), (unsigned long) &rfc); | 2340 | sizeof(rfc), (unsigned long) &rfc); |
2116 | 2341 | ||
2342 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { | ||
2343 | chan->remote_id = efs.id; | ||
2344 | chan->remote_stype = efs.stype; | ||
2345 | chan->remote_msdu = le16_to_cpu(efs.msdu); | ||
2346 | chan->remote_flush_to = | ||
2347 | le32_to_cpu(efs.flush_to); | ||
2348 | chan->remote_acc_lat = | ||
2349 | le32_to_cpu(efs.acc_lat); | ||
2350 | chan->remote_sdu_itime = | ||
2351 | le32_to_cpu(efs.sdu_itime); | ||
2352 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | ||
2353 | sizeof(efs), (unsigned long) &efs); | ||
2354 | } | ||
2117 | break; | 2355 | break; |
2118 | 2356 | ||
2119 | case L2CAP_MODE_STREAMING: | 2357 | case L2CAP_MODE_STREAMING: |
2120 | if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) | 2358 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
2121 | rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); | 2359 | chan->conn->mtu - |
2122 | 2360 | L2CAP_EXT_HDR_SIZE - | |
2123 | chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); | 2361 | L2CAP_SDULEN_SIZE - |
2362 | L2CAP_FCS_SIZE); | ||
2363 | rfc.max_pdu_size = cpu_to_le16(size); | ||
2364 | chan->remote_mps = size; | ||
2124 | 2365 | ||
2125 | set_bit(CONF_MODE_DONE, &chan->conf_state); | 2366 | set_bit(CONF_MODE_DONE, &chan->conf_state); |
2126 | 2367 | ||
@@ -2153,6 +2394,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
2153 | int type, olen; | 2394 | int type, olen; |
2154 | unsigned long val; | 2395 | unsigned long val; |
2155 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; | 2396 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; |
2397 | struct l2cap_conf_efs efs; | ||
2156 | 2398 | ||
2157 | BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); | 2399 | BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); |
2158 | 2400 | ||
@@ -2188,6 +2430,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
2188 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 2430 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, |
2189 | sizeof(rfc), (unsigned long) &rfc); | 2431 | sizeof(rfc), (unsigned long) &rfc); |
2190 | break; | 2432 | break; |
2433 | |||
2434 | case L2CAP_CONF_EWS: | ||
2435 | chan->tx_win = min_t(u16, val, | ||
2436 | L2CAP_DEFAULT_EXT_WINDOW); | ||
2437 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, | ||
2438 | chan->tx_win); | ||
2439 | break; | ||
2440 | |||
2441 | case L2CAP_CONF_EFS: | ||
2442 | if (olen == sizeof(efs)) | ||
2443 | memcpy(&efs, (void *)val, olen); | ||
2444 | |||
2445 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | ||
2446 | efs.stype != L2CAP_SERV_NOTRAFIC && | ||
2447 | efs.stype != chan->local_stype) | ||
2448 | return -ECONNREFUSED; | ||
2449 | |||
2450 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | ||
2451 | sizeof(efs), (unsigned long) &efs); | ||
2452 | break; | ||
2191 | } | 2453 | } |
2192 | } | 2454 | } |
2193 | 2455 | ||
@@ -2196,13 +2458,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
2196 | 2458 | ||
2197 | chan->mode = rfc.mode; | 2459 | chan->mode = rfc.mode; |
2198 | 2460 | ||
2199 | if (*result == L2CAP_CONF_SUCCESS) { | 2461 | if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { |
2200 | switch (rfc.mode) { | 2462 | switch (rfc.mode) { |
2201 | case L2CAP_MODE_ERTM: | 2463 | case L2CAP_MODE_ERTM: |
2202 | chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); | 2464 | chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); |
2203 | chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); | 2465 | chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); |
2204 | chan->mps = le16_to_cpu(rfc.max_pdu_size); | 2466 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
2467 | |||
2468 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { | ||
2469 | chan->local_msdu = le16_to_cpu(efs.msdu); | ||
2470 | chan->local_sdu_itime = | ||
2471 | le32_to_cpu(efs.sdu_itime); | ||
2472 | chan->local_acc_lat = le32_to_cpu(efs.acc_lat); | ||
2473 | chan->local_flush_to = | ||
2474 | le32_to_cpu(efs.flush_to); | ||
2475 | } | ||
2205 | break; | 2476 | break; |
2477 | |||
2206 | case L2CAP_MODE_STREAMING: | 2478 | case L2CAP_MODE_STREAMING: |
2207 | chan->mps = le16_to_cpu(rfc.max_pdu_size); | 2479 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
2208 | } | 2480 | } |
@@ -2302,7 +2574,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2302 | 2574 | ||
2303 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && | 2575 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && |
2304 | cmd->ident == conn->info_ident) { | 2576 | cmd->ident == conn->info_ident) { |
2305 | del_timer(&conn->info_timer); | 2577 | __cancel_delayed_work(&conn->info_timer); |
2306 | 2578 | ||
2307 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 2579 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
2308 | conn->info_ident = 0; | 2580 | conn->info_ident = 0; |
@@ -2335,12 +2607,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2335 | 2607 | ||
2336 | parent = pchan->sk; | 2608 | parent = pchan->sk; |
2337 | 2609 | ||
2338 | bh_lock_sock(parent); | 2610 | lock_sock(parent); |
2339 | 2611 | ||
2340 | /* Check if the ACL is secure enough (if not SDP) */ | 2612 | /* Check if the ACL is secure enough (if not SDP) */ |
2341 | if (psm != cpu_to_le16(0x0001) && | 2613 | if (psm != cpu_to_le16(0x0001) && |
2342 | !hci_conn_check_link_mode(conn->hcon)) { | 2614 | !hci_conn_check_link_mode(conn->hcon)) { |
2343 | conn->disc_reason = 0x05; | 2615 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; |
2344 | result = L2CAP_CR_SEC_BLOCK; | 2616 | result = L2CAP_CR_SEC_BLOCK; |
2345 | goto response; | 2617 | goto response; |
2346 | } | 2618 | } |
@@ -2359,11 +2631,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2359 | 2631 | ||
2360 | sk = chan->sk; | 2632 | sk = chan->sk; |
2361 | 2633 | ||
2362 | write_lock_bh(&conn->chan_lock); | ||
2363 | |||
2364 | /* Check if we already have channel with that dcid */ | 2634 | /* Check if we already have channel with that dcid */ |
2365 | if (__l2cap_get_chan_by_dcid(conn, scid)) { | 2635 | if (__l2cap_get_chan_by_dcid(conn, scid)) { |
2366 | write_unlock_bh(&conn->chan_lock); | ||
2367 | sock_set_flag(sk, SOCK_ZAPPED); | 2636 | sock_set_flag(sk, SOCK_ZAPPED); |
2368 | chan->ops->close(chan->data); | 2637 | chan->ops->close(chan->data); |
2369 | goto response; | 2638 | goto response; |
@@ -2378,7 +2647,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2378 | 2647 | ||
2379 | bt_accept_enqueue(parent, sk); | 2648 | bt_accept_enqueue(parent, sk); |
2380 | 2649 | ||
2381 | __l2cap_chan_add(conn, chan); | 2650 | l2cap_chan_add(conn, chan); |
2382 | 2651 | ||
2383 | dcid = chan->scid; | 2652 | dcid = chan->scid; |
2384 | 2653 | ||
@@ -2387,7 +2656,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2387 | chan->ident = cmd->ident; | 2656 | chan->ident = cmd->ident; |
2388 | 2657 | ||
2389 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { | 2658 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { |
2390 | if (l2cap_check_security(chan)) { | 2659 | if (l2cap_chan_check_security(chan)) { |
2391 | if (bt_sk(sk)->defer_setup) { | 2660 | if (bt_sk(sk)->defer_setup) { |
2392 | l2cap_state_change(chan, BT_CONNECT2); | 2661 | l2cap_state_change(chan, BT_CONNECT2); |
2393 | result = L2CAP_CR_PEND; | 2662 | result = L2CAP_CR_PEND; |
@@ -2409,10 +2678,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2409 | status = L2CAP_CS_NO_INFO; | 2678 | status = L2CAP_CS_NO_INFO; |
2410 | } | 2679 | } |
2411 | 2680 | ||
2412 | write_unlock_bh(&conn->chan_lock); | ||
2413 | |||
2414 | response: | 2681 | response: |
2415 | bh_unlock_sock(parent); | 2682 | release_sock(parent); |
2416 | 2683 | ||
2417 | sendresp: | 2684 | sendresp: |
2418 | rsp.scid = cpu_to_le16(scid); | 2685 | rsp.scid = cpu_to_le16(scid); |
@@ -2428,7 +2695,7 @@ sendresp: | |||
2428 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; | 2695 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
2429 | conn->info_ident = l2cap_get_ident(conn); | 2696 | conn->info_ident = l2cap_get_ident(conn); |
2430 | 2697 | ||
2431 | mod_timer(&conn->info_timer, jiffies + | 2698 | schedule_delayed_work(&conn->info_timer, |
2432 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); | 2699 | msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); |
2433 | 2700 | ||
2434 | l2cap_send_cmd(conn, conn->info_ident, | 2701 | l2cap_send_cmd(conn, conn->info_ident, |
@@ -2494,19 +2761,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2494 | break; | 2761 | break; |
2495 | 2762 | ||
2496 | default: | 2763 | default: |
2497 | /* don't delete l2cap channel if sk is owned by user */ | ||
2498 | if (sock_owned_by_user(sk)) { | ||
2499 | l2cap_state_change(chan, BT_DISCONN); | ||
2500 | __clear_chan_timer(chan); | ||
2501 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); | ||
2502 | break; | ||
2503 | } | ||
2504 | |||
2505 | l2cap_chan_del(chan, ECONNREFUSED); | 2764 | l2cap_chan_del(chan, ECONNREFUSED); |
2506 | break; | 2765 | break; |
2507 | } | 2766 | } |
2508 | 2767 | ||
2509 | bh_unlock_sock(sk); | 2768 | release_sock(sk); |
2510 | return 0; | 2769 | return 0; |
2511 | } | 2770 | } |
2512 | 2771 | ||
@@ -2612,8 +2871,23 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2612 | chan->num_conf_req++; | 2871 | chan->num_conf_req++; |
2613 | } | 2872 | } |
2614 | 2873 | ||
2874 | /* Got Conf Rsp PENDING from remote side and asume we sent | ||
2875 | Conf Rsp PENDING in the code above */ | ||
2876 | if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && | ||
2877 | test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { | ||
2878 | |||
2879 | /* check compatibility */ | ||
2880 | |||
2881 | clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | ||
2882 | set_bit(CONF_OUTPUT_DONE, &chan->conf_state); | ||
2883 | |||
2884 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | ||
2885 | l2cap_build_conf_rsp(chan, rsp, | ||
2886 | L2CAP_CONF_SUCCESS, 0x0000), rsp); | ||
2887 | } | ||
2888 | |||
2615 | unlock: | 2889 | unlock: |
2616 | bh_unlock_sock(sk); | 2890 | release_sock(sk); |
2617 | return 0; | 2891 | return 0; |
2618 | } | 2892 | } |
2619 | 2893 | ||
@@ -2641,8 +2915,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2641 | switch (result) { | 2915 | switch (result) { |
2642 | case L2CAP_CONF_SUCCESS: | 2916 | case L2CAP_CONF_SUCCESS: |
2643 | l2cap_conf_rfc_get(chan, rsp->data, len); | 2917 | l2cap_conf_rfc_get(chan, rsp->data, len); |
2918 | clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); | ||
2644 | break; | 2919 | break; |
2645 | 2920 | ||
2921 | case L2CAP_CONF_PENDING: | ||
2922 | set_bit(CONF_REM_CONF_PEND, &chan->conf_state); | ||
2923 | |||
2924 | if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { | ||
2925 | char buf[64]; | ||
2926 | |||
2927 | len = l2cap_parse_conf_rsp(chan, rsp->data, len, | ||
2928 | buf, &result); | ||
2929 | if (len < 0) { | ||
2930 | l2cap_send_disconn_req(conn, chan, ECONNRESET); | ||
2931 | goto done; | ||
2932 | } | ||
2933 | |||
2934 | /* check compatibility */ | ||
2935 | |||
2936 | clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | ||
2937 | set_bit(CONF_OUTPUT_DONE, &chan->conf_state); | ||
2938 | |||
2939 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | ||
2940 | l2cap_build_conf_rsp(chan, buf, | ||
2941 | L2CAP_CONF_SUCCESS, 0x0000), buf); | ||
2942 | } | ||
2943 | goto done; | ||
2944 | |||
2646 | case L2CAP_CONF_UNACCEPT: | 2945 | case L2CAP_CONF_UNACCEPT: |
2647 | if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { | 2946 | if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { |
2648 | char req[64]; | 2947 | char req[64]; |
@@ -2695,7 +2994,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2695 | } | 2994 | } |
2696 | 2995 | ||
2697 | done: | 2996 | done: |
2698 | bh_unlock_sock(sk); | 2997 | release_sock(sk); |
2699 | return 0; | 2998 | return 0; |
2700 | } | 2999 | } |
2701 | 3000 | ||
@@ -2724,17 +3023,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd | |||
2724 | 3023 | ||
2725 | sk->sk_shutdown = SHUTDOWN_MASK; | 3024 | sk->sk_shutdown = SHUTDOWN_MASK; |
2726 | 3025 | ||
2727 | /* don't delete l2cap channel if sk is owned by user */ | ||
2728 | if (sock_owned_by_user(sk)) { | ||
2729 | l2cap_state_change(chan, BT_DISCONN); | ||
2730 | __clear_chan_timer(chan); | ||
2731 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); | ||
2732 | bh_unlock_sock(sk); | ||
2733 | return 0; | ||
2734 | } | ||
2735 | |||
2736 | l2cap_chan_del(chan, ECONNRESET); | 3026 | l2cap_chan_del(chan, ECONNRESET); |
2737 | bh_unlock_sock(sk); | 3027 | release_sock(sk); |
2738 | 3028 | ||
2739 | chan->ops->close(chan->data); | 3029 | chan->ops->close(chan->data); |
2740 | return 0; | 3030 | return 0; |
@@ -2758,17 +3048,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
2758 | 3048 | ||
2759 | sk = chan->sk; | 3049 | sk = chan->sk; |
2760 | 3050 | ||
2761 | /* don't delete l2cap channel if sk is owned by user */ | ||
2762 | if (sock_owned_by_user(sk)) { | ||
2763 | l2cap_state_change(chan,BT_DISCONN); | ||
2764 | __clear_chan_timer(chan); | ||
2765 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); | ||
2766 | bh_unlock_sock(sk); | ||
2767 | return 0; | ||
2768 | } | ||
2769 | |||
2770 | l2cap_chan_del(chan, 0); | 3051 | l2cap_chan_del(chan, 0); |
2771 | bh_unlock_sock(sk); | 3052 | release_sock(sk); |
2772 | 3053 | ||
2773 | chan->ops->close(chan->data); | 3054 | chan->ops->close(chan->data); |
2774 | return 0; | 3055 | return 0; |
@@ -2792,15 +3073,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
2792 | if (!disable_ertm) | 3073 | if (!disable_ertm) |
2793 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | 3074 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING |
2794 | | L2CAP_FEAT_FCS; | 3075 | | L2CAP_FEAT_FCS; |
3076 | if (enable_hs) | ||
3077 | feat_mask |= L2CAP_FEAT_EXT_FLOW | ||
3078 | | L2CAP_FEAT_EXT_WINDOW; | ||
3079 | |||
2795 | put_unaligned_le32(feat_mask, rsp->data); | 3080 | put_unaligned_le32(feat_mask, rsp->data); |
2796 | l2cap_send_cmd(conn, cmd->ident, | 3081 | l2cap_send_cmd(conn, cmd->ident, |
2797 | L2CAP_INFO_RSP, sizeof(buf), buf); | 3082 | L2CAP_INFO_RSP, sizeof(buf), buf); |
2798 | } else if (type == L2CAP_IT_FIXED_CHAN) { | 3083 | } else if (type == L2CAP_IT_FIXED_CHAN) { |
2799 | u8 buf[12]; | 3084 | u8 buf[12]; |
2800 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; | 3085 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
3086 | |||
3087 | if (enable_hs) | ||
3088 | l2cap_fixed_chan[0] |= L2CAP_FC_A2MP; | ||
3089 | else | ||
3090 | l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; | ||
3091 | |||
2801 | rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); | 3092 | rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
2802 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); | 3093 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); |
2803 | memcpy(buf + 4, l2cap_fixed_chan, 8); | 3094 | memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); |
2804 | l2cap_send_cmd(conn, cmd->ident, | 3095 | l2cap_send_cmd(conn, cmd->ident, |
2805 | L2CAP_INFO_RSP, sizeof(buf), buf); | 3096 | L2CAP_INFO_RSP, sizeof(buf), buf); |
2806 | } else { | 3097 | } else { |
@@ -2829,7 +3120,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
2829 | conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) | 3120 | conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) |
2830 | return 0; | 3121 | return 0; |
2831 | 3122 | ||
2832 | del_timer(&conn->info_timer); | 3123 | __cancel_delayed_work(&conn->info_timer); |
2833 | 3124 | ||
2834 | if (result != L2CAP_IR_SUCCESS) { | 3125 | if (result != L2CAP_IR_SUCCESS) { |
2835 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 3126 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
@@ -2867,6 +3158,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
2867 | return 0; | 3158 | return 0; |
2868 | } | 3159 | } |
2869 | 3160 | ||
3161 | static inline int l2cap_create_channel_req(struct l2cap_conn *conn, | ||
3162 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, | ||
3163 | void *data) | ||
3164 | { | ||
3165 | struct l2cap_create_chan_req *req = data; | ||
3166 | struct l2cap_create_chan_rsp rsp; | ||
3167 | u16 psm, scid; | ||
3168 | |||
3169 | if (cmd_len != sizeof(*req)) | ||
3170 | return -EPROTO; | ||
3171 | |||
3172 | if (!enable_hs) | ||
3173 | return -EINVAL; | ||
3174 | |||
3175 | psm = le16_to_cpu(req->psm); | ||
3176 | scid = le16_to_cpu(req->scid); | ||
3177 | |||
3178 | BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); | ||
3179 | |||
3180 | /* Placeholder: Always reject */ | ||
3181 | rsp.dcid = 0; | ||
3182 | rsp.scid = cpu_to_le16(scid); | ||
3183 | rsp.result = L2CAP_CR_NO_MEM; | ||
3184 | rsp.status = L2CAP_CS_NO_INFO; | ||
3185 | |||
3186 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, | ||
3187 | sizeof(rsp), &rsp); | ||
3188 | |||
3189 | return 0; | ||
3190 | } | ||
3191 | |||
3192 | static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, | ||
3193 | struct l2cap_cmd_hdr *cmd, void *data) | ||
3194 | { | ||
3195 | BT_DBG("conn %p", conn); | ||
3196 | |||
3197 | return l2cap_connect_rsp(conn, cmd, data); | ||
3198 | } | ||
3199 | |||
3200 | static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, | ||
3201 | u16 icid, u16 result) | ||
3202 | { | ||
3203 | struct l2cap_move_chan_rsp rsp; | ||
3204 | |||
3205 | BT_DBG("icid %d, result %d", icid, result); | ||
3206 | |||
3207 | rsp.icid = cpu_to_le16(icid); | ||
3208 | rsp.result = cpu_to_le16(result); | ||
3209 | |||
3210 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); | ||
3211 | } | ||
3212 | |||
3213 | static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, | ||
3214 | struct l2cap_chan *chan, u16 icid, u16 result) | ||
3215 | { | ||
3216 | struct l2cap_move_chan_cfm cfm; | ||
3217 | u8 ident; | ||
3218 | |||
3219 | BT_DBG("icid %d, result %d", icid, result); | ||
3220 | |||
3221 | ident = l2cap_get_ident(conn); | ||
3222 | if (chan) | ||
3223 | chan->ident = ident; | ||
3224 | |||
3225 | cfm.icid = cpu_to_le16(icid); | ||
3226 | cfm.result = cpu_to_le16(result); | ||
3227 | |||
3228 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); | ||
3229 | } | ||
3230 | |||
3231 | static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, | ||
3232 | u16 icid) | ||
3233 | { | ||
3234 | struct l2cap_move_chan_cfm_rsp rsp; | ||
3235 | |||
3236 | BT_DBG("icid %d", icid); | ||
3237 | |||
3238 | rsp.icid = cpu_to_le16(icid); | ||
3239 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); | ||
3240 | } | ||
3241 | |||
3242 | static inline int l2cap_move_channel_req(struct l2cap_conn *conn, | ||
3243 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) | ||
3244 | { | ||
3245 | struct l2cap_move_chan_req *req = data; | ||
3246 | u16 icid = 0; | ||
3247 | u16 result = L2CAP_MR_NOT_ALLOWED; | ||
3248 | |||
3249 | if (cmd_len != sizeof(*req)) | ||
3250 | return -EPROTO; | ||
3251 | |||
3252 | icid = le16_to_cpu(req->icid); | ||
3253 | |||
3254 | BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); | ||
3255 | |||
3256 | if (!enable_hs) | ||
3257 | return -EINVAL; | ||
3258 | |||
3259 | /* Placeholder: Always refuse */ | ||
3260 | l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); | ||
3261 | |||
3262 | return 0; | ||
3263 | } | ||
3264 | |||
3265 | static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, | ||
3266 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) | ||
3267 | { | ||
3268 | struct l2cap_move_chan_rsp *rsp = data; | ||
3269 | u16 icid, result; | ||
3270 | |||
3271 | if (cmd_len != sizeof(*rsp)) | ||
3272 | return -EPROTO; | ||
3273 | |||
3274 | icid = le16_to_cpu(rsp->icid); | ||
3275 | result = le16_to_cpu(rsp->result); | ||
3276 | |||
3277 | BT_DBG("icid %d, result %d", icid, result); | ||
3278 | |||
3279 | /* Placeholder: Always unconfirmed */ | ||
3280 | l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); | ||
3281 | |||
3282 | return 0; | ||
3283 | } | ||
3284 | |||
3285 | static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, | ||
3286 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) | ||
3287 | { | ||
3288 | struct l2cap_move_chan_cfm *cfm = data; | ||
3289 | u16 icid, result; | ||
3290 | |||
3291 | if (cmd_len != sizeof(*cfm)) | ||
3292 | return -EPROTO; | ||
3293 | |||
3294 | icid = le16_to_cpu(cfm->icid); | ||
3295 | result = le16_to_cpu(cfm->result); | ||
3296 | |||
3297 | BT_DBG("icid %d, result %d", icid, result); | ||
3298 | |||
3299 | l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); | ||
3300 | |||
3301 | return 0; | ||
3302 | } | ||
3303 | |||
3304 | static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, | ||
3305 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) | ||
3306 | { | ||
3307 | struct l2cap_move_chan_cfm_rsp *rsp = data; | ||
3308 | u16 icid; | ||
3309 | |||
3310 | if (cmd_len != sizeof(*rsp)) | ||
3311 | return -EPROTO; | ||
3312 | |||
3313 | icid = le16_to_cpu(rsp->icid); | ||
3314 | |||
3315 | BT_DBG("icid %d", icid); | ||
3316 | |||
3317 | return 0; | ||
3318 | } | ||
3319 | |||
2870 | static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, | 3320 | static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, |
2871 | u16 to_multiplier) | 3321 | u16 to_multiplier) |
2872 | { | 3322 | { |
@@ -2979,6 +3429,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, | |||
2979 | err = l2cap_information_rsp(conn, cmd, data); | 3429 | err = l2cap_information_rsp(conn, cmd, data); |
2980 | break; | 3430 | break; |
2981 | 3431 | ||
3432 | case L2CAP_CREATE_CHAN_REQ: | ||
3433 | err = l2cap_create_channel_req(conn, cmd, cmd_len, data); | ||
3434 | break; | ||
3435 | |||
3436 | case L2CAP_CREATE_CHAN_RSP: | ||
3437 | err = l2cap_create_channel_rsp(conn, cmd, data); | ||
3438 | break; | ||
3439 | |||
3440 | case L2CAP_MOVE_CHAN_REQ: | ||
3441 | err = l2cap_move_channel_req(conn, cmd, cmd_len, data); | ||
3442 | break; | ||
3443 | |||
3444 | case L2CAP_MOVE_CHAN_RSP: | ||
3445 | err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data); | ||
3446 | break; | ||
3447 | |||
3448 | case L2CAP_MOVE_CHAN_CFM: | ||
3449 | err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); | ||
3450 | break; | ||
3451 | |||
3452 | case L2CAP_MOVE_CHAN_CFM_RSP: | ||
3453 | err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); | ||
3454 | break; | ||
3455 | |||
2982 | default: | 3456 | default: |
2983 | BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); | 3457 | BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); |
2984 | err = -EINVAL; | 3458 | err = -EINVAL; |
@@ -3057,10 +3531,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, | |||
3057 | static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) | 3531 | static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) |
3058 | { | 3532 | { |
3059 | u16 our_fcs, rcv_fcs; | 3533 | u16 our_fcs, rcv_fcs; |
3060 | int hdr_size = L2CAP_HDR_SIZE + 2; | 3534 | int hdr_size; |
3535 | |||
3536 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | ||
3537 | hdr_size = L2CAP_EXT_HDR_SIZE; | ||
3538 | else | ||
3539 | hdr_size = L2CAP_ENH_HDR_SIZE; | ||
3061 | 3540 | ||
3062 | if (chan->fcs == L2CAP_FCS_CRC16) { | 3541 | if (chan->fcs == L2CAP_FCS_CRC16) { |
3063 | skb_trim(skb, skb->len - 2); | 3542 | skb_trim(skb, skb->len - L2CAP_FCS_SIZE); |
3064 | rcv_fcs = get_unaligned_le16(skb->data + skb->len); | 3543 | rcv_fcs = get_unaligned_le16(skb->data + skb->len); |
3065 | our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); | 3544 | our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); |
3066 | 3545 | ||
@@ -3072,14 +3551,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) | |||
3072 | 3551 | ||
3073 | static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) | 3552 | static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) |
3074 | { | 3553 | { |
3075 | u16 control = 0; | 3554 | u32 control = 0; |
3076 | 3555 | ||
3077 | chan->frames_sent = 0; | 3556 | chan->frames_sent = 0; |
3078 | 3557 | ||
3079 | control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 3558 | control |= __set_reqseq(chan, chan->buffer_seq); |
3080 | 3559 | ||
3081 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 3560 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
3082 | control |= L2CAP_SUPER_RCV_NOT_READY; | 3561 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); |
3083 | l2cap_send_sframe(chan, control); | 3562 | l2cap_send_sframe(chan, control); |
3084 | set_bit(CONN_RNR_SENT, &chan->conn_state); | 3563 | set_bit(CONN_RNR_SENT, &chan->conn_state); |
3085 | } | 3564 | } |
@@ -3091,12 +3570,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) | |||
3091 | 3570 | ||
3092 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && | 3571 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && |
3093 | chan->frames_sent == 0) { | 3572 | chan->frames_sent == 0) { |
3094 | control |= L2CAP_SUPER_RCV_READY; | 3573 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); |
3095 | l2cap_send_sframe(chan, control); | 3574 | l2cap_send_sframe(chan, control); |
3096 | } | 3575 | } |
3097 | } | 3576 | } |
3098 | 3577 | ||
3099 | static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar) | 3578 | static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) |
3100 | { | 3579 | { |
3101 | struct sk_buff *next_skb; | 3580 | struct sk_buff *next_skb; |
3102 | int tx_seq_offset, next_tx_seq_offset; | 3581 | int tx_seq_offset, next_tx_seq_offset; |
@@ -3105,23 +3584,15 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, | |||
3105 | bt_cb(skb)->sar = sar; | 3584 | bt_cb(skb)->sar = sar; |
3106 | 3585 | ||
3107 | next_skb = skb_peek(&chan->srej_q); | 3586 | next_skb = skb_peek(&chan->srej_q); |
3108 | if (!next_skb) { | ||
3109 | __skb_queue_tail(&chan->srej_q, skb); | ||
3110 | return 0; | ||
3111 | } | ||
3112 | 3587 | ||
3113 | tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; | 3588 | tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); |
3114 | if (tx_seq_offset < 0) | ||
3115 | tx_seq_offset += 64; | ||
3116 | 3589 | ||
3117 | do { | 3590 | while (next_skb) { |
3118 | if (bt_cb(next_skb)->tx_seq == tx_seq) | 3591 | if (bt_cb(next_skb)->tx_seq == tx_seq) |
3119 | return -EINVAL; | 3592 | return -EINVAL; |
3120 | 3593 | ||
3121 | next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - | 3594 | next_tx_seq_offset = __seq_offset(chan, |
3122 | chan->buffer_seq) % 64; | 3595 | bt_cb(next_skb)->tx_seq, chan->buffer_seq); |
3123 | if (next_tx_seq_offset < 0) | ||
3124 | next_tx_seq_offset += 64; | ||
3125 | 3596 | ||
3126 | if (next_tx_seq_offset > tx_seq_offset) { | 3597 | if (next_tx_seq_offset > tx_seq_offset) { |
3127 | __skb_queue_before(&chan->srej_q, next_skb, skb); | 3598 | __skb_queue_before(&chan->srej_q, next_skb, skb); |
@@ -3129,9 +3600,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, | |||
3129 | } | 3600 | } |
3130 | 3601 | ||
3131 | if (skb_queue_is_last(&chan->srej_q, next_skb)) | 3602 | if (skb_queue_is_last(&chan->srej_q, next_skb)) |
3132 | break; | 3603 | next_skb = NULL; |
3133 | 3604 | else | |
3134 | } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb))); | 3605 | next_skb = skb_queue_next(&chan->srej_q, next_skb); |
3606 | } | ||
3135 | 3607 | ||
3136 | __skb_queue_tail(&chan->srej_q, skb); | 3608 | __skb_queue_tail(&chan->srej_q, skb); |
3137 | 3609 | ||
@@ -3157,24 +3629,24 @@ static void append_skb_frag(struct sk_buff *skb, | |||
3157 | skb->truesize += new_frag->truesize; | 3629 | skb->truesize += new_frag->truesize; |
3158 | } | 3630 | } |
3159 | 3631 | ||
3160 | static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) | 3632 | static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) |
3161 | { | 3633 | { |
3162 | int err = -EINVAL; | 3634 | int err = -EINVAL; |
3163 | 3635 | ||
3164 | switch (control & L2CAP_CTRL_SAR) { | 3636 | switch (__get_ctrl_sar(chan, control)) { |
3165 | case L2CAP_SDU_UNSEGMENTED: | 3637 | case L2CAP_SAR_UNSEGMENTED: |
3166 | if (chan->sdu) | 3638 | if (chan->sdu) |
3167 | break; | 3639 | break; |
3168 | 3640 | ||
3169 | err = chan->ops->recv(chan->data, skb); | 3641 | err = chan->ops->recv(chan->data, skb); |
3170 | break; | 3642 | break; |
3171 | 3643 | ||
3172 | case L2CAP_SDU_START: | 3644 | case L2CAP_SAR_START: |
3173 | if (chan->sdu) | 3645 | if (chan->sdu) |
3174 | break; | 3646 | break; |
3175 | 3647 | ||
3176 | chan->sdu_len = get_unaligned_le16(skb->data); | 3648 | chan->sdu_len = get_unaligned_le16(skb->data); |
3177 | skb_pull(skb, 2); | 3649 | skb_pull(skb, L2CAP_SDULEN_SIZE); |
3178 | 3650 | ||
3179 | if (chan->sdu_len > chan->imtu) { | 3651 | if (chan->sdu_len > chan->imtu) { |
3180 | err = -EMSGSIZE; | 3652 | err = -EMSGSIZE; |
@@ -3191,7 +3663,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 | |||
3191 | err = 0; | 3663 | err = 0; |
3192 | break; | 3664 | break; |
3193 | 3665 | ||
3194 | case L2CAP_SDU_CONTINUE: | 3666 | case L2CAP_SAR_CONTINUE: |
3195 | if (!chan->sdu) | 3667 | if (!chan->sdu) |
3196 | break; | 3668 | break; |
3197 | 3669 | ||
@@ -3205,7 +3677,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 | |||
3205 | err = 0; | 3677 | err = 0; |
3206 | break; | 3678 | break; |
3207 | 3679 | ||
3208 | case L2CAP_SDU_END: | 3680 | case L2CAP_SAR_END: |
3209 | if (!chan->sdu) | 3681 | if (!chan->sdu) |
3210 | break; | 3682 | break; |
3211 | 3683 | ||
@@ -3240,14 +3712,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 | |||
3240 | 3712 | ||
3241 | static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) | 3713 | static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) |
3242 | { | 3714 | { |
3243 | u16 control; | 3715 | u32 control; |
3244 | 3716 | ||
3245 | BT_DBG("chan %p, Enter local busy", chan); | 3717 | BT_DBG("chan %p, Enter local busy", chan); |
3246 | 3718 | ||
3247 | set_bit(CONN_LOCAL_BUSY, &chan->conn_state); | 3719 | set_bit(CONN_LOCAL_BUSY, &chan->conn_state); |
3248 | 3720 | ||
3249 | control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 3721 | control = __set_reqseq(chan, chan->buffer_seq); |
3250 | control |= L2CAP_SUPER_RCV_NOT_READY; | 3722 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); |
3251 | l2cap_send_sframe(chan, control); | 3723 | l2cap_send_sframe(chan, control); |
3252 | 3724 | ||
3253 | set_bit(CONN_RNR_SENT, &chan->conn_state); | 3725 | set_bit(CONN_RNR_SENT, &chan->conn_state); |
@@ -3257,13 +3729,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) | |||
3257 | 3729 | ||
3258 | static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) | 3730 | static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) |
3259 | { | 3731 | { |
3260 | u16 control; | 3732 | u32 control; |
3261 | 3733 | ||
3262 | if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) | 3734 | if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) |
3263 | goto done; | 3735 | goto done; |
3264 | 3736 | ||
3265 | control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 3737 | control = __set_reqseq(chan, chan->buffer_seq); |
3266 | control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; | 3738 | control |= __set_ctrl_poll(chan); |
3739 | control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); | ||
3267 | l2cap_send_sframe(chan, control); | 3740 | l2cap_send_sframe(chan, control); |
3268 | chan->retry_count = 1; | 3741 | chan->retry_count = 1; |
3269 | 3742 | ||
@@ -3289,10 +3762,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy) | |||
3289 | } | 3762 | } |
3290 | } | 3763 | } |
3291 | 3764 | ||
3292 | static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) | 3765 | static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) |
3293 | { | 3766 | { |
3294 | struct sk_buff *skb; | 3767 | struct sk_buff *skb; |
3295 | u16 control; | 3768 | u32 control; |
3296 | 3769 | ||
3297 | while ((skb = skb_peek(&chan->srej_q)) && | 3770 | while ((skb = skb_peek(&chan->srej_q)) && |
3298 | !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | 3771 | !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
@@ -3302,7 +3775,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) | |||
3302 | break; | 3775 | break; |
3303 | 3776 | ||
3304 | skb = skb_dequeue(&chan->srej_q); | 3777 | skb = skb_dequeue(&chan->srej_q); |
3305 | control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; | 3778 | control = __set_ctrl_sar(chan, bt_cb(skb)->sar); |
3306 | err = l2cap_reassemble_sdu(chan, skb, control); | 3779 | err = l2cap_reassemble_sdu(chan, skb, control); |
3307 | 3780 | ||
3308 | if (err < 0) { | 3781 | if (err < 0) { |
@@ -3310,16 +3783,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) | |||
3310 | break; | 3783 | break; |
3311 | } | 3784 | } |
3312 | 3785 | ||
3313 | chan->buffer_seq_srej = | 3786 | chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); |
3314 | (chan->buffer_seq_srej + 1) % 64; | 3787 | tx_seq = __next_seq(chan, tx_seq); |
3315 | tx_seq = (tx_seq + 1) % 64; | ||
3316 | } | 3788 | } |
3317 | } | 3789 | } |
3318 | 3790 | ||
3319 | static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) | 3791 | static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) |
3320 | { | 3792 | { |
3321 | struct srej_list *l, *tmp; | 3793 | struct srej_list *l, *tmp; |
3322 | u16 control; | 3794 | u32 control; |
3323 | 3795 | ||
3324 | list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { | 3796 | list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { |
3325 | if (l->tx_seq == tx_seq) { | 3797 | if (l->tx_seq == tx_seq) { |
@@ -3327,45 +3799,53 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) | |||
3327 | kfree(l); | 3799 | kfree(l); |
3328 | return; | 3800 | return; |
3329 | } | 3801 | } |
3330 | control = L2CAP_SUPER_SELECT_REJECT; | 3802 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); |
3331 | control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 3803 | control |= __set_reqseq(chan, l->tx_seq); |
3332 | l2cap_send_sframe(chan, control); | 3804 | l2cap_send_sframe(chan, control); |
3333 | list_del(&l->list); | 3805 | list_del(&l->list); |
3334 | list_add_tail(&l->list, &chan->srej_l); | 3806 | list_add_tail(&l->list, &chan->srej_l); |
3335 | } | 3807 | } |
3336 | } | 3808 | } |
3337 | 3809 | ||
3338 | static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq) | 3810 | static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) |
3339 | { | 3811 | { |
3340 | struct srej_list *new; | 3812 | struct srej_list *new; |
3341 | u16 control; | 3813 | u32 control; |
3342 | 3814 | ||
3343 | while (tx_seq != chan->expected_tx_seq) { | 3815 | while (tx_seq != chan->expected_tx_seq) { |
3344 | control = L2CAP_SUPER_SELECT_REJECT; | 3816 | control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); |
3345 | control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; | 3817 | control |= __set_reqseq(chan, chan->expected_tx_seq); |
3346 | l2cap_send_sframe(chan, control); | 3818 | l2cap_send_sframe(chan, control); |
3347 | 3819 | ||
3348 | new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); | 3820 | new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); |
3821 | if (!new) | ||
3822 | return -ENOMEM; | ||
3823 | |||
3349 | new->tx_seq = chan->expected_tx_seq; | 3824 | new->tx_seq = chan->expected_tx_seq; |
3350 | chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; | 3825 | |
3826 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); | ||
3827 | |||
3351 | list_add_tail(&new->list, &chan->srej_l); | 3828 | list_add_tail(&new->list, &chan->srej_l); |
3352 | } | 3829 | } |
3353 | chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; | 3830 | |
3831 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); | ||
3832 | |||
3833 | return 0; | ||
3354 | } | 3834 | } |
3355 | 3835 | ||
3356 | static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) | 3836 | static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) |
3357 | { | 3837 | { |
3358 | u8 tx_seq = __get_txseq(rx_control); | 3838 | u16 tx_seq = __get_txseq(chan, rx_control); |
3359 | u8 req_seq = __get_reqseq(rx_control); | 3839 | u16 req_seq = __get_reqseq(chan, rx_control); |
3360 | u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; | 3840 | u8 sar = __get_ctrl_sar(chan, rx_control); |
3361 | int tx_seq_offset, expected_tx_seq_offset; | 3841 | int tx_seq_offset, expected_tx_seq_offset; |
3362 | int num_to_ack = (chan->tx_win/6) + 1; | 3842 | int num_to_ack = (chan->tx_win/6) + 1; |
3363 | int err = 0; | 3843 | int err = 0; |
3364 | 3844 | ||
3365 | BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len, | 3845 | BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, |
3366 | tx_seq, rx_control); | 3846 | tx_seq, rx_control); |
3367 | 3847 | ||
3368 | if (L2CAP_CTRL_FINAL & rx_control && | 3848 | if (__is_ctrl_final(chan, rx_control) && |
3369 | test_bit(CONN_WAIT_F, &chan->conn_state)) { | 3849 | test_bit(CONN_WAIT_F, &chan->conn_state)) { |
3370 | __clear_monitor_timer(chan); | 3850 | __clear_monitor_timer(chan); |
3371 | if (chan->unacked_frames > 0) | 3851 | if (chan->unacked_frames > 0) |
@@ -3376,9 +3856,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont | |||
3376 | chan->expected_ack_seq = req_seq; | 3856 | chan->expected_ack_seq = req_seq; |
3377 | l2cap_drop_acked_frames(chan); | 3857 | l2cap_drop_acked_frames(chan); |
3378 | 3858 | ||
3379 | tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; | 3859 | tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); |
3380 | if (tx_seq_offset < 0) | ||
3381 | tx_seq_offset += 64; | ||
3382 | 3860 | ||
3383 | /* invalid tx_seq */ | 3861 | /* invalid tx_seq */ |
3384 | if (tx_seq_offset >= chan->tx_win) { | 3862 | if (tx_seq_offset >= chan->tx_win) { |
@@ -3423,13 +3901,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont | |||
3423 | return 0; | 3901 | return 0; |
3424 | } | 3902 | } |
3425 | } | 3903 | } |
3426 | l2cap_send_srejframe(chan, tx_seq); | 3904 | |
3905 | err = l2cap_send_srejframe(chan, tx_seq); | ||
3906 | if (err < 0) { | ||
3907 | l2cap_send_disconn_req(chan->conn, chan, -err); | ||
3908 | return err; | ||
3909 | } | ||
3427 | } | 3910 | } |
3428 | } else { | 3911 | } else { |
3429 | expected_tx_seq_offset = | 3912 | expected_tx_seq_offset = __seq_offset(chan, |
3430 | (chan->expected_tx_seq - chan->buffer_seq) % 64; | 3913 | chan->expected_tx_seq, chan->buffer_seq); |
3431 | if (expected_tx_seq_offset < 0) | ||
3432 | expected_tx_seq_offset += 64; | ||
3433 | 3914 | ||
3434 | /* duplicated tx_seq */ | 3915 | /* duplicated tx_seq */ |
3435 | if (tx_seq_offset < expected_tx_seq_offset) | 3916 | if (tx_seq_offset < expected_tx_seq_offset) |
@@ -3447,14 +3928,18 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont | |||
3447 | 3928 | ||
3448 | set_bit(CONN_SEND_PBIT, &chan->conn_state); | 3929 | set_bit(CONN_SEND_PBIT, &chan->conn_state); |
3449 | 3930 | ||
3450 | l2cap_send_srejframe(chan, tx_seq); | 3931 | err = l2cap_send_srejframe(chan, tx_seq); |
3932 | if (err < 0) { | ||
3933 | l2cap_send_disconn_req(chan->conn, chan, -err); | ||
3934 | return err; | ||
3935 | } | ||
3451 | 3936 | ||
3452 | __clear_ack_timer(chan); | 3937 | __clear_ack_timer(chan); |
3453 | } | 3938 | } |
3454 | return 0; | 3939 | return 0; |
3455 | 3940 | ||
3456 | expected: | 3941 | expected: |
3457 | chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; | 3942 | chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); |
3458 | 3943 | ||
3459 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 3944 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { |
3460 | bt_cb(skb)->tx_seq = tx_seq; | 3945 | bt_cb(skb)->tx_seq = tx_seq; |
@@ -3464,22 +3949,24 @@ expected: | |||
3464 | } | 3949 | } |
3465 | 3950 | ||
3466 | err = l2cap_reassemble_sdu(chan, skb, rx_control); | 3951 | err = l2cap_reassemble_sdu(chan, skb, rx_control); |
3467 | chan->buffer_seq = (chan->buffer_seq + 1) % 64; | 3952 | chan->buffer_seq = __next_seq(chan, chan->buffer_seq); |
3953 | |||
3468 | if (err < 0) { | 3954 | if (err < 0) { |
3469 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 3955 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
3470 | return err; | 3956 | return err; |
3471 | } | 3957 | } |
3472 | 3958 | ||
3473 | if (rx_control & L2CAP_CTRL_FINAL) { | 3959 | if (__is_ctrl_final(chan, rx_control)) { |
3474 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | 3960 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) |
3475 | l2cap_retransmit_frames(chan); | 3961 | l2cap_retransmit_frames(chan); |
3476 | } | 3962 | } |
3477 | 3963 | ||
3478 | __set_ack_timer(chan); | ||
3479 | 3964 | ||
3480 | chan->num_acked = (chan->num_acked + 1) % num_to_ack; | 3965 | chan->num_acked = (chan->num_acked + 1) % num_to_ack; |
3481 | if (chan->num_acked == num_to_ack - 1) | 3966 | if (chan->num_acked == num_to_ack - 1) |
3482 | l2cap_send_ack(chan); | 3967 | l2cap_send_ack(chan); |
3968 | else | ||
3969 | __set_ack_timer(chan); | ||
3483 | 3970 | ||
3484 | return 0; | 3971 | return 0; |
3485 | 3972 | ||
@@ -3488,15 +3975,15 @@ drop: | |||
3488 | return 0; | 3975 | return 0; |
3489 | } | 3976 | } |
3490 | 3977 | ||
3491 | static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control) | 3978 | static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) |
3492 | { | 3979 | { |
3493 | BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control), | 3980 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, |
3494 | rx_control); | 3981 | __get_reqseq(chan, rx_control), rx_control); |
3495 | 3982 | ||
3496 | chan->expected_ack_seq = __get_reqseq(rx_control); | 3983 | chan->expected_ack_seq = __get_reqseq(chan, rx_control); |
3497 | l2cap_drop_acked_frames(chan); | 3984 | l2cap_drop_acked_frames(chan); |
3498 | 3985 | ||
3499 | if (rx_control & L2CAP_CTRL_POLL) { | 3986 | if (__is_ctrl_poll(chan, rx_control)) { |
3500 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | 3987 | set_bit(CONN_SEND_FBIT, &chan->conn_state); |
3501 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 3988 | if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { |
3502 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && | 3989 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && |
@@ -3509,7 +3996,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co | |||
3509 | l2cap_send_i_or_rr_or_rnr(chan); | 3996 | l2cap_send_i_or_rr_or_rnr(chan); |
3510 | } | 3997 | } |
3511 | 3998 | ||
3512 | } else if (rx_control & L2CAP_CTRL_FINAL) { | 3999 | } else if (__is_ctrl_final(chan, rx_control)) { |
3513 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4000 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
3514 | 4001 | ||
3515 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | 4002 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) |
@@ -3528,18 +4015,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co | |||
3528 | } | 4015 | } |
3529 | } | 4016 | } |
3530 | 4017 | ||
3531 | static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control) | 4018 | static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) |
3532 | { | 4019 | { |
3533 | u8 tx_seq = __get_reqseq(rx_control); | 4020 | u16 tx_seq = __get_reqseq(chan, rx_control); |
3534 | 4021 | ||
3535 | BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); | 4022 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); |
3536 | 4023 | ||
3537 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4024 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
3538 | 4025 | ||
3539 | chan->expected_ack_seq = tx_seq; | 4026 | chan->expected_ack_seq = tx_seq; |
3540 | l2cap_drop_acked_frames(chan); | 4027 | l2cap_drop_acked_frames(chan); |
3541 | 4028 | ||
3542 | if (rx_control & L2CAP_CTRL_FINAL) { | 4029 | if (__is_ctrl_final(chan, rx_control)) { |
3543 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) | 4030 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) |
3544 | l2cap_retransmit_frames(chan); | 4031 | l2cap_retransmit_frames(chan); |
3545 | } else { | 4032 | } else { |
@@ -3549,15 +4036,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c | |||
3549 | set_bit(CONN_REJ_ACT, &chan->conn_state); | 4036 | set_bit(CONN_REJ_ACT, &chan->conn_state); |
3550 | } | 4037 | } |
3551 | } | 4038 | } |
3552 | static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) | 4039 | static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) |
3553 | { | 4040 | { |
3554 | u8 tx_seq = __get_reqseq(rx_control); | 4041 | u16 tx_seq = __get_reqseq(chan, rx_control); |
3555 | 4042 | ||
3556 | BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); | 4043 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); |
3557 | 4044 | ||
3558 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4045 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
3559 | 4046 | ||
3560 | if (rx_control & L2CAP_CTRL_POLL) { | 4047 | if (__is_ctrl_poll(chan, rx_control)) { |
3561 | chan->expected_ack_seq = tx_seq; | 4048 | chan->expected_ack_seq = tx_seq; |
3562 | l2cap_drop_acked_frames(chan); | 4049 | l2cap_drop_acked_frames(chan); |
3563 | 4050 | ||
@@ -3570,7 +4057,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_ | |||
3570 | chan->srej_save_reqseq = tx_seq; | 4057 | chan->srej_save_reqseq = tx_seq; |
3571 | set_bit(CONN_SREJ_ACT, &chan->conn_state); | 4058 | set_bit(CONN_SREJ_ACT, &chan->conn_state); |
3572 | } | 4059 | } |
3573 | } else if (rx_control & L2CAP_CTRL_FINAL) { | 4060 | } else if (__is_ctrl_final(chan, rx_control)) { |
3574 | if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && | 4061 | if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && |
3575 | chan->srej_save_reqseq == tx_seq) | 4062 | chan->srej_save_reqseq == tx_seq) |
3576 | clear_bit(CONN_SREJ_ACT, &chan->conn_state); | 4063 | clear_bit(CONN_SREJ_ACT, &chan->conn_state); |
@@ -3585,37 +4072,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_ | |||
3585 | } | 4072 | } |
3586 | } | 4073 | } |
3587 | 4074 | ||
3588 | static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control) | 4075 | static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) |
3589 | { | 4076 | { |
3590 | u8 tx_seq = __get_reqseq(rx_control); | 4077 | u16 tx_seq = __get_reqseq(chan, rx_control); |
3591 | 4078 | ||
3592 | BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); | 4079 | BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); |
3593 | 4080 | ||
3594 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 4081 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
3595 | chan->expected_ack_seq = tx_seq; | 4082 | chan->expected_ack_seq = tx_seq; |
3596 | l2cap_drop_acked_frames(chan); | 4083 | l2cap_drop_acked_frames(chan); |
3597 | 4084 | ||
3598 | if (rx_control & L2CAP_CTRL_POLL) | 4085 | if (__is_ctrl_poll(chan, rx_control)) |
3599 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | 4086 | set_bit(CONN_SEND_FBIT, &chan->conn_state); |
3600 | 4087 | ||
3601 | if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { | 4088 | if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { |
3602 | __clear_retrans_timer(chan); | 4089 | __clear_retrans_timer(chan); |
3603 | if (rx_control & L2CAP_CTRL_POLL) | 4090 | if (__is_ctrl_poll(chan, rx_control)) |
3604 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); | 4091 | l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); |
3605 | return; | 4092 | return; |
3606 | } | 4093 | } |
3607 | 4094 | ||
3608 | if (rx_control & L2CAP_CTRL_POLL) | 4095 | if (__is_ctrl_poll(chan, rx_control)) { |
3609 | l2cap_send_srejtail(chan); | 4096 | l2cap_send_srejtail(chan); |
3610 | else | 4097 | } else { |
3611 | l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY); | 4098 | rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); |
4099 | l2cap_send_sframe(chan, rx_control); | ||
4100 | } | ||
3612 | } | 4101 | } |
3613 | 4102 | ||
3614 | static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) | 4103 | static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) |
3615 | { | 4104 | { |
3616 | BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); | 4105 | BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); |
3617 | 4106 | ||
3618 | if (L2CAP_CTRL_FINAL & rx_control && | 4107 | if (__is_ctrl_final(chan, rx_control) && |
3619 | test_bit(CONN_WAIT_F, &chan->conn_state)) { | 4108 | test_bit(CONN_WAIT_F, &chan->conn_state)) { |
3620 | __clear_monitor_timer(chan); | 4109 | __clear_monitor_timer(chan); |
3621 | if (chan->unacked_frames > 0) | 4110 | if (chan->unacked_frames > 0) |
@@ -3623,20 +4112,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont | |||
3623 | clear_bit(CONN_WAIT_F, &chan->conn_state); | 4112 | clear_bit(CONN_WAIT_F, &chan->conn_state); |
3624 | } | 4113 | } |
3625 | 4114 | ||
3626 | switch (rx_control & L2CAP_CTRL_SUPERVISE) { | 4115 | switch (__get_ctrl_super(chan, rx_control)) { |
3627 | case L2CAP_SUPER_RCV_READY: | 4116 | case L2CAP_SUPER_RR: |
3628 | l2cap_data_channel_rrframe(chan, rx_control); | 4117 | l2cap_data_channel_rrframe(chan, rx_control); |
3629 | break; | 4118 | break; |
3630 | 4119 | ||
3631 | case L2CAP_SUPER_REJECT: | 4120 | case L2CAP_SUPER_REJ: |
3632 | l2cap_data_channel_rejframe(chan, rx_control); | 4121 | l2cap_data_channel_rejframe(chan, rx_control); |
3633 | break; | 4122 | break; |
3634 | 4123 | ||
3635 | case L2CAP_SUPER_SELECT_REJECT: | 4124 | case L2CAP_SUPER_SREJ: |
3636 | l2cap_data_channel_srejframe(chan, rx_control); | 4125 | l2cap_data_channel_srejframe(chan, rx_control); |
3637 | break; | 4126 | break; |
3638 | 4127 | ||
3639 | case L2CAP_SUPER_RCV_NOT_READY: | 4128 | case L2CAP_SUPER_RNR: |
3640 | l2cap_data_channel_rnrframe(chan, rx_control); | 4129 | l2cap_data_channel_rnrframe(chan, rx_control); |
3641 | break; | 4130 | break; |
3642 | } | 4131 | } |
@@ -3648,12 +4137,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont | |||
3648 | static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) | 4137 | static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) |
3649 | { | 4138 | { |
3650 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 4139 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
3651 | u16 control; | 4140 | u32 control; |
3652 | u8 req_seq; | 4141 | u16 req_seq; |
3653 | int len, next_tx_seq_offset, req_seq_offset; | 4142 | int len, next_tx_seq_offset, req_seq_offset; |
3654 | 4143 | ||
3655 | control = get_unaligned_le16(skb->data); | 4144 | control = __get_control(chan, skb->data); |
3656 | skb_pull(skb, 2); | 4145 | skb_pull(skb, __ctrl_size(chan)); |
3657 | len = skb->len; | 4146 | len = skb->len; |
3658 | 4147 | ||
3659 | /* | 4148 | /* |
@@ -3664,26 +4153,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) | |||
3664 | if (l2cap_check_fcs(chan, skb)) | 4153 | if (l2cap_check_fcs(chan, skb)) |
3665 | goto drop; | 4154 | goto drop; |
3666 | 4155 | ||
3667 | if (__is_sar_start(control) && __is_iframe(control)) | 4156 | if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) |
3668 | len -= 2; | 4157 | len -= L2CAP_SDULEN_SIZE; |
3669 | 4158 | ||
3670 | if (chan->fcs == L2CAP_FCS_CRC16) | 4159 | if (chan->fcs == L2CAP_FCS_CRC16) |
3671 | len -= 2; | 4160 | len -= L2CAP_FCS_SIZE; |
3672 | 4161 | ||
3673 | if (len > chan->mps) { | 4162 | if (len > chan->mps) { |
3674 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4163 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
3675 | goto drop; | 4164 | goto drop; |
3676 | } | 4165 | } |
3677 | 4166 | ||
3678 | req_seq = __get_reqseq(control); | 4167 | req_seq = __get_reqseq(chan, control); |
3679 | req_seq_offset = (req_seq - chan->expected_ack_seq) % 64; | ||
3680 | if (req_seq_offset < 0) | ||
3681 | req_seq_offset += 64; | ||
3682 | 4168 | ||
3683 | next_tx_seq_offset = | 4169 | req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); |
3684 | (chan->next_tx_seq - chan->expected_ack_seq) % 64; | 4170 | |
3685 | if (next_tx_seq_offset < 0) | 4171 | next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, |
3686 | next_tx_seq_offset += 64; | 4172 | chan->expected_ack_seq); |
3687 | 4173 | ||
3688 | /* check for invalid req-seq */ | 4174 | /* check for invalid req-seq */ |
3689 | if (req_seq_offset > next_tx_seq_offset) { | 4175 | if (req_seq_offset > next_tx_seq_offset) { |
@@ -3691,7 +4177,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) | |||
3691 | goto drop; | 4177 | goto drop; |
3692 | } | 4178 | } |
3693 | 4179 | ||
3694 | if (__is_iframe(control)) { | 4180 | if (!__is_sframe(chan, control)) { |
3695 | if (len < 0) { | 4181 | if (len < 0) { |
3696 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4182 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
3697 | goto drop; | 4183 | goto drop; |
@@ -3719,8 +4205,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk | |||
3719 | { | 4205 | { |
3720 | struct l2cap_chan *chan; | 4206 | struct l2cap_chan *chan; |
3721 | struct sock *sk = NULL; | 4207 | struct sock *sk = NULL; |
3722 | u16 control; | 4208 | u32 control; |
3723 | u8 tx_seq; | 4209 | u16 tx_seq; |
3724 | int len; | 4210 | int len; |
3725 | 4211 | ||
3726 | chan = l2cap_get_chan_by_scid(conn, cid); | 4212 | chan = l2cap_get_chan_by_scid(conn, cid); |
@@ -3751,33 +4237,28 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk | |||
3751 | break; | 4237 | break; |
3752 | 4238 | ||
3753 | case L2CAP_MODE_ERTM: | 4239 | case L2CAP_MODE_ERTM: |
3754 | if (!sock_owned_by_user(sk)) { | 4240 | l2cap_ertm_data_rcv(sk, skb); |
3755 | l2cap_ertm_data_rcv(sk, skb); | ||
3756 | } else { | ||
3757 | if (sk_add_backlog(sk, skb)) | ||
3758 | goto drop; | ||
3759 | } | ||
3760 | 4241 | ||
3761 | goto done; | 4242 | goto done; |
3762 | 4243 | ||
3763 | case L2CAP_MODE_STREAMING: | 4244 | case L2CAP_MODE_STREAMING: |
3764 | control = get_unaligned_le16(skb->data); | 4245 | control = __get_control(chan, skb->data); |
3765 | skb_pull(skb, 2); | 4246 | skb_pull(skb, __ctrl_size(chan)); |
3766 | len = skb->len; | 4247 | len = skb->len; |
3767 | 4248 | ||
3768 | if (l2cap_check_fcs(chan, skb)) | 4249 | if (l2cap_check_fcs(chan, skb)) |
3769 | goto drop; | 4250 | goto drop; |
3770 | 4251 | ||
3771 | if (__is_sar_start(control)) | 4252 | if (__is_sar_start(chan, control)) |
3772 | len -= 2; | 4253 | len -= L2CAP_SDULEN_SIZE; |
3773 | 4254 | ||
3774 | if (chan->fcs == L2CAP_FCS_CRC16) | 4255 | if (chan->fcs == L2CAP_FCS_CRC16) |
3775 | len -= 2; | 4256 | len -= L2CAP_FCS_SIZE; |
3776 | 4257 | ||
3777 | if (len > chan->mps || len < 0 || __is_sframe(control)) | 4258 | if (len > chan->mps || len < 0 || __is_sframe(chan, control)) |
3778 | goto drop; | 4259 | goto drop; |
3779 | 4260 | ||
3780 | tx_seq = __get_txseq(control); | 4261 | tx_seq = __get_txseq(chan, control); |
3781 | 4262 | ||
3782 | if (chan->expected_tx_seq != tx_seq) { | 4263 | if (chan->expected_tx_seq != tx_seq) { |
3783 | /* Frame(s) missing - must discard partial SDU */ | 4264 | /* Frame(s) missing - must discard partial SDU */ |
@@ -3789,7 +4270,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk | |||
3789 | /* TODO: Notify userland of missing data */ | 4270 | /* TODO: Notify userland of missing data */ |
3790 | } | 4271 | } |
3791 | 4272 | ||
3792 | chan->expected_tx_seq = (tx_seq + 1) % 64; | 4273 | chan->expected_tx_seq = __next_seq(chan, tx_seq); |
3793 | 4274 | ||
3794 | if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) | 4275 | if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) |
3795 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 4276 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
@@ -3806,7 +4287,7 @@ drop: | |||
3806 | 4287 | ||
3807 | done: | 4288 | done: |
3808 | if (sk) | 4289 | if (sk) |
3809 | bh_unlock_sock(sk); | 4290 | release_sock(sk); |
3810 | 4291 | ||
3811 | return 0; | 4292 | return 0; |
3812 | } | 4293 | } |
@@ -3822,7 +4303,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str | |||
3822 | 4303 | ||
3823 | sk = chan->sk; | 4304 | sk = chan->sk; |
3824 | 4305 | ||
3825 | bh_lock_sock(sk); | 4306 | lock_sock(sk); |
3826 | 4307 | ||
3827 | BT_DBG("sk %p, len %d", sk, skb->len); | 4308 | BT_DBG("sk %p, len %d", sk, skb->len); |
3828 | 4309 | ||
@@ -3840,7 +4321,7 @@ drop: | |||
3840 | 4321 | ||
3841 | done: | 4322 | done: |
3842 | if (sk) | 4323 | if (sk) |
3843 | bh_unlock_sock(sk); | 4324 | release_sock(sk); |
3844 | return 0; | 4325 | return 0; |
3845 | } | 4326 | } |
3846 | 4327 | ||
@@ -3855,7 +4336,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct | |||
3855 | 4336 | ||
3856 | sk = chan->sk; | 4337 | sk = chan->sk; |
3857 | 4338 | ||
3858 | bh_lock_sock(sk); | 4339 | lock_sock(sk); |
3859 | 4340 | ||
3860 | BT_DBG("sk %p, len %d", sk, skb->len); | 4341 | BT_DBG("sk %p, len %d", sk, skb->len); |
3861 | 4342 | ||
@@ -3873,7 +4354,7 @@ drop: | |||
3873 | 4354 | ||
3874 | done: | 4355 | done: |
3875 | if (sk) | 4356 | if (sk) |
3876 | bh_unlock_sock(sk); | 4357 | release_sock(sk); |
3877 | return 0; | 4358 | return 0; |
3878 | } | 4359 | } |
3879 | 4360 | ||
@@ -3923,14 +4404,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | |||
3923 | 4404 | ||
3924 | /* ---- L2CAP interface with lower layer (HCI) ---- */ | 4405 | /* ---- L2CAP interface with lower layer (HCI) ---- */ |
3925 | 4406 | ||
3926 | static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | 4407 | int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) |
3927 | { | 4408 | { |
3928 | int exact = 0, lm1 = 0, lm2 = 0; | 4409 | int exact = 0, lm1 = 0, lm2 = 0; |
3929 | struct l2cap_chan *c; | 4410 | struct l2cap_chan *c; |
3930 | 4411 | ||
3931 | if (type != ACL_LINK) | ||
3932 | return -EINVAL; | ||
3933 | |||
3934 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); | 4412 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); |
3935 | 4413 | ||
3936 | /* Find listening sockets and check their link_mode */ | 4414 | /* Find listening sockets and check their link_mode */ |
@@ -3943,12 +4421,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |||
3943 | 4421 | ||
3944 | if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { | 4422 | if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { |
3945 | lm1 |= HCI_LM_ACCEPT; | 4423 | lm1 |= HCI_LM_ACCEPT; |
3946 | if (c->role_switch) | 4424 | if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) |
3947 | lm1 |= HCI_LM_MASTER; | 4425 | lm1 |= HCI_LM_MASTER; |
3948 | exact++; | 4426 | exact++; |
3949 | } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { | 4427 | } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { |
3950 | lm2 |= HCI_LM_ACCEPT; | 4428 | lm2 |= HCI_LM_ACCEPT; |
3951 | if (c->role_switch) | 4429 | if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) |
3952 | lm2 |= HCI_LM_MASTER; | 4430 | lm2 |= HCI_LM_MASTER; |
3953 | } | 4431 | } |
3954 | } | 4432 | } |
@@ -3957,15 +4435,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |||
3957 | return exact ? lm1 : lm2; | 4435 | return exact ? lm1 : lm2; |
3958 | } | 4436 | } |
3959 | 4437 | ||
3960 | static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) | 4438 | int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) |
3961 | { | 4439 | { |
3962 | struct l2cap_conn *conn; | 4440 | struct l2cap_conn *conn; |
3963 | 4441 | ||
3964 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 4442 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); |
3965 | 4443 | ||
3966 | if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) | ||
3967 | return -EINVAL; | ||
3968 | |||
3969 | if (!status) { | 4444 | if (!status) { |
3970 | conn = l2cap_conn_add(hcon, status); | 4445 | conn = l2cap_conn_add(hcon, status); |
3971 | if (conn) | 4446 | if (conn) |
@@ -3976,27 +4451,22 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) | |||
3976 | return 0; | 4451 | return 0; |
3977 | } | 4452 | } |
3978 | 4453 | ||
3979 | static int l2cap_disconn_ind(struct hci_conn *hcon) | 4454 | int l2cap_disconn_ind(struct hci_conn *hcon) |
3980 | { | 4455 | { |
3981 | struct l2cap_conn *conn = hcon->l2cap_data; | 4456 | struct l2cap_conn *conn = hcon->l2cap_data; |
3982 | 4457 | ||
3983 | BT_DBG("hcon %p", hcon); | 4458 | BT_DBG("hcon %p", hcon); |
3984 | 4459 | ||
3985 | if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) | 4460 | if (!conn) |
3986 | return 0x13; | 4461 | return HCI_ERROR_REMOTE_USER_TERM; |
3987 | |||
3988 | return conn->disc_reason; | 4462 | return conn->disc_reason; |
3989 | } | 4463 | } |
3990 | 4464 | ||
3991 | static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) | 4465 | int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) |
3992 | { | 4466 | { |
3993 | BT_DBG("hcon %p reason %d", hcon, reason); | 4467 | BT_DBG("hcon %p reason %d", hcon, reason); |
3994 | 4468 | ||
3995 | if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) | ||
3996 | return -EINVAL; | ||
3997 | |||
3998 | l2cap_conn_del(hcon, bt_to_errno(reason)); | 4469 | l2cap_conn_del(hcon, bt_to_errno(reason)); |
3999 | |||
4000 | return 0; | 4470 | return 0; |
4001 | } | 4471 | } |
4002 | 4472 | ||
@@ -4017,7 +4487,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) | |||
4017 | } | 4487 | } |
4018 | } | 4488 | } |
4019 | 4489 | ||
4020 | static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | 4490 | int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) |
4021 | { | 4491 | { |
4022 | struct l2cap_conn *conn = hcon->l2cap_data; | 4492 | struct l2cap_conn *conn = hcon->l2cap_data; |
4023 | struct l2cap_chan *chan; | 4493 | struct l2cap_chan *chan; |
@@ -4029,12 +4499,12 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
4029 | 4499 | ||
4030 | if (hcon->type == LE_LINK) { | 4500 | if (hcon->type == LE_LINK) { |
4031 | smp_distribute_keys(conn, 0); | 4501 | smp_distribute_keys(conn, 0); |
4032 | del_timer(&conn->security_timer); | 4502 | __cancel_delayed_work(&conn->security_timer); |
4033 | } | 4503 | } |
4034 | 4504 | ||
4035 | read_lock(&conn->chan_lock); | 4505 | rcu_read_lock(); |
4036 | 4506 | ||
4037 | list_for_each_entry(chan, &conn->chan_l, list) { | 4507 | list_for_each_entry_rcu(chan, &conn->chan_l, list) { |
4038 | struct sock *sk = chan->sk; | 4508 | struct sock *sk = chan->sk; |
4039 | 4509 | ||
4040 | bh_lock_sock(sk); | 4510 | bh_lock_sock(sk); |
@@ -4112,12 +4582,12 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
4112 | bh_unlock_sock(sk); | 4582 | bh_unlock_sock(sk); |
4113 | } | 4583 | } |
4114 | 4584 | ||
4115 | read_unlock(&conn->chan_lock); | 4585 | rcu_read_unlock(); |
4116 | 4586 | ||
4117 | return 0; | 4587 | return 0; |
4118 | } | 4588 | } |
4119 | 4589 | ||
4120 | static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | 4590 | int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) |
4121 | { | 4591 | { |
4122 | struct l2cap_conn *conn = hcon->l2cap_data; | 4592 | struct l2cap_conn *conn = hcon->l2cap_data; |
4123 | 4593 | ||
@@ -4178,11 +4648,11 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl | |||
4178 | BT_ERR("Frame exceeding recv MTU (len %d, " | 4648 | BT_ERR("Frame exceeding recv MTU (len %d, " |
4179 | "MTU %d)", len, | 4649 | "MTU %d)", len, |
4180 | chan->imtu); | 4650 | chan->imtu); |
4181 | bh_unlock_sock(sk); | 4651 | release_sock(sk); |
4182 | l2cap_conn_unreliable(conn, ECOMM); | 4652 | l2cap_conn_unreliable(conn, ECOMM); |
4183 | goto drop; | 4653 | goto drop; |
4184 | } | 4654 | } |
4185 | bh_unlock_sock(sk); | 4655 | release_sock(sk); |
4186 | } | 4656 | } |
4187 | 4657 | ||
4188 | /* Allocate skb for the complete frame (with header) */ | 4658 | /* Allocate skb for the complete frame (with header) */ |
@@ -4264,17 +4734,6 @@ static const struct file_operations l2cap_debugfs_fops = { | |||
4264 | 4734 | ||
4265 | static struct dentry *l2cap_debugfs; | 4735 | static struct dentry *l2cap_debugfs; |
4266 | 4736 | ||
4267 | static struct hci_proto l2cap_hci_proto = { | ||
4268 | .name = "L2CAP", | ||
4269 | .id = HCI_PROTO_L2CAP, | ||
4270 | .connect_ind = l2cap_connect_ind, | ||
4271 | .connect_cfm = l2cap_connect_cfm, | ||
4272 | .disconn_ind = l2cap_disconn_ind, | ||
4273 | .disconn_cfm = l2cap_disconn_cfm, | ||
4274 | .security_cfm = l2cap_security_cfm, | ||
4275 | .recv_acldata = l2cap_recv_acldata | ||
4276 | }; | ||
4277 | |||
4278 | int __init l2cap_init(void) | 4737 | int __init l2cap_init(void) |
4279 | { | 4738 | { |
4280 | int err; | 4739 | int err; |
@@ -4283,13 +4742,6 @@ int __init l2cap_init(void) | |||
4283 | if (err < 0) | 4742 | if (err < 0) |
4284 | return err; | 4743 | return err; |
4285 | 4744 | ||
4286 | err = hci_register_proto(&l2cap_hci_proto); | ||
4287 | if (err < 0) { | ||
4288 | BT_ERR("L2CAP protocol registration failed"); | ||
4289 | bt_sock_unregister(BTPROTO_L2CAP); | ||
4290 | goto error; | ||
4291 | } | ||
4292 | |||
4293 | if (bt_debugfs) { | 4745 | if (bt_debugfs) { |
4294 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, | 4746 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, |
4295 | bt_debugfs, NULL, &l2cap_debugfs_fops); | 4747 | bt_debugfs, NULL, &l2cap_debugfs_fops); |
@@ -4298,19 +4750,11 @@ int __init l2cap_init(void) | |||
4298 | } | 4750 | } |
4299 | 4751 | ||
4300 | return 0; | 4752 | return 0; |
4301 | |||
4302 | error: | ||
4303 | l2cap_cleanup_sockets(); | ||
4304 | return err; | ||
4305 | } | 4753 | } |
4306 | 4754 | ||
4307 | void l2cap_exit(void) | 4755 | void l2cap_exit(void) |
4308 | { | 4756 | { |
4309 | debugfs_remove(l2cap_debugfs); | 4757 | debugfs_remove(l2cap_debugfs); |
4310 | |||
4311 | if (hci_unregister_proto(&l2cap_hci_proto) < 0) | ||
4312 | BT_ERR("L2CAP protocol unregistration failed"); | ||
4313 | |||
4314 | l2cap_cleanup_sockets(); | 4758 | l2cap_cleanup_sockets(); |
4315 | } | 4759 | } |
4316 | 4760 | ||
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 5c406d3136f7..9ca5616166f7 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -3,6 +3,7 @@ | |||
3 | Copyright (C) 2000-2001 Qualcomm Incorporated | 3 | Copyright (C) 2000-2001 Qualcomm Incorporated |
4 | Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> | 4 | Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> |
5 | Copyright (C) 2010 Google Inc. | 5 | Copyright (C) 2010 Google Inc. |
6 | Copyright (C) 2011 ProFUSION Embedded Systems | ||
6 | 7 | ||
7 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> | 8 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
8 | 9 | ||
@@ -122,70 +123,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
122 | if (la.l2_cid && la.l2_psm) | 123 | if (la.l2_cid && la.l2_psm) |
123 | return -EINVAL; | 124 | return -EINVAL; |
124 | 125 | ||
125 | lock_sock(sk); | 126 | err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); |
126 | |||
127 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED | ||
128 | && !(la.l2_psm || la.l2_cid)) { | ||
129 | err = -EINVAL; | ||
130 | goto done; | ||
131 | } | ||
132 | |||
133 | switch (chan->mode) { | ||
134 | case L2CAP_MODE_BASIC: | ||
135 | break; | ||
136 | case L2CAP_MODE_ERTM: | ||
137 | case L2CAP_MODE_STREAMING: | ||
138 | if (!disable_ertm) | ||
139 | break; | ||
140 | /* fall through */ | ||
141 | default: | ||
142 | err = -ENOTSUPP; | ||
143 | goto done; | ||
144 | } | ||
145 | |||
146 | switch (sk->sk_state) { | ||
147 | case BT_CONNECT: | ||
148 | case BT_CONNECT2: | ||
149 | case BT_CONFIG: | ||
150 | /* Already connecting */ | ||
151 | goto wait; | ||
152 | |||
153 | case BT_CONNECTED: | ||
154 | /* Already connected */ | ||
155 | err = -EISCONN; | ||
156 | goto done; | ||
157 | |||
158 | case BT_OPEN: | ||
159 | case BT_BOUND: | ||
160 | /* Can connect */ | ||
161 | break; | ||
162 | |||
163 | default: | ||
164 | err = -EBADFD; | ||
165 | goto done; | ||
166 | } | ||
167 | |||
168 | /* PSM must be odd and lsb of upper byte must be 0 */ | ||
169 | if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid && | ||
170 | chan->chan_type != L2CAP_CHAN_RAW) { | ||
171 | err = -EINVAL; | ||
172 | goto done; | ||
173 | } | ||
174 | |||
175 | /* Set destination address and psm */ | ||
176 | bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); | ||
177 | chan->psm = la.l2_psm; | ||
178 | chan->dcid = la.l2_cid; | ||
179 | |||
180 | err = l2cap_chan_connect(l2cap_pi(sk)->chan); | ||
181 | if (err) | 127 | if (err) |
182 | goto done; | 128 | goto done; |
183 | 129 | ||
184 | wait: | ||
185 | err = bt_sock_wait_state(sk, BT_CONNECTED, | 130 | err = bt_sock_wait_state(sk, BT_CONNECTED, |
186 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 131 | sock_sndtimeo(sk, flags & O_NONBLOCK)); |
187 | done: | 132 | done: |
188 | release_sock(sk); | 133 | if (sock_owned_by_user(sk)) |
134 | release_sock(sk); | ||
189 | return err; | 135 | return err; |
190 | } | 136 | } |
191 | 137 | ||
@@ -334,7 +280,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us | |||
334 | opts.mode = chan->mode; | 280 | opts.mode = chan->mode; |
335 | opts.fcs = chan->fcs; | 281 | opts.fcs = chan->fcs; |
336 | opts.max_tx = chan->max_tx; | 282 | opts.max_tx = chan->max_tx; |
337 | opts.txwin_size = (__u16)chan->tx_win; | 283 | opts.txwin_size = chan->tx_win; |
338 | 284 | ||
339 | len = min_t(unsigned int, len, sizeof(opts)); | 285 | len = min_t(unsigned int, len, sizeof(opts)); |
340 | if (copy_to_user(optval, (char *) &opts, len)) | 286 | if (copy_to_user(optval, (char *) &opts, len)) |
@@ -359,10 +305,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us | |||
359 | break; | 305 | break; |
360 | } | 306 | } |
361 | 307 | ||
362 | if (chan->role_switch) | 308 | if (test_bit(FLAG_ROLE_SWITCH, &chan->flags)) |
363 | opt |= L2CAP_LM_MASTER; | 309 | opt |= L2CAP_LM_MASTER; |
364 | 310 | ||
365 | if (chan->force_reliable) | 311 | if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) |
366 | opt |= L2CAP_LM_RELIABLE; | 312 | opt |= L2CAP_LM_RELIABLE; |
367 | 313 | ||
368 | if (put_user(opt, (u32 __user *) optval)) | 314 | if (put_user(opt, (u32 __user *) optval)) |
@@ -449,7 +395,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
449 | break; | 395 | break; |
450 | 396 | ||
451 | case BT_FLUSHABLE: | 397 | case BT_FLUSHABLE: |
452 | if (put_user(chan->flushable, (u32 __user *) optval)) | 398 | if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), |
399 | (u32 __user *) optval)) | ||
453 | err = -EFAULT; | 400 | err = -EFAULT; |
454 | 401 | ||
455 | break; | 402 | break; |
@@ -461,7 +408,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
461 | break; | 408 | break; |
462 | } | 409 | } |
463 | 410 | ||
464 | pwr.force_active = chan->force_active; | 411 | pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); |
465 | 412 | ||
466 | len = min_t(unsigned int, len, sizeof(pwr)); | 413 | len = min_t(unsigned int, len, sizeof(pwr)); |
467 | if (copy_to_user(optval, (char *) &pwr, len)) | 414 | if (copy_to_user(optval, (char *) &pwr, len)) |
@@ -469,6 +416,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
469 | 416 | ||
470 | break; | 417 | break; |
471 | 418 | ||
419 | case BT_CHANNEL_POLICY: | ||
420 | if (!enable_hs) { | ||
421 | err = -ENOPROTOOPT; | ||
422 | break; | ||
423 | } | ||
424 | |||
425 | if (put_user(chan->chan_policy, (u32 __user *) optval)) | ||
426 | err = -EFAULT; | ||
427 | break; | ||
428 | |||
472 | default: | 429 | default: |
473 | err = -ENOPROTOOPT; | 430 | err = -ENOPROTOOPT; |
474 | break; | 431 | break; |
@@ -503,7 +460,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
503 | opts.mode = chan->mode; | 460 | opts.mode = chan->mode; |
504 | opts.fcs = chan->fcs; | 461 | opts.fcs = chan->fcs; |
505 | opts.max_tx = chan->max_tx; | 462 | opts.max_tx = chan->max_tx; |
506 | opts.txwin_size = (__u16)chan->tx_win; | 463 | opts.txwin_size = chan->tx_win; |
507 | 464 | ||
508 | len = min_t(unsigned int, sizeof(opts), optlen); | 465 | len = min_t(unsigned int, sizeof(opts), optlen); |
509 | if (copy_from_user((char *) &opts, optval, len)) { | 466 | if (copy_from_user((char *) &opts, optval, len)) { |
@@ -511,7 +468,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
511 | break; | 468 | break; |
512 | } | 469 | } |
513 | 470 | ||
514 | if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { | 471 | if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { |
515 | err = -EINVAL; | 472 | err = -EINVAL; |
516 | break; | 473 | break; |
517 | } | 474 | } |
@@ -535,7 +492,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
535 | chan->omtu = opts.omtu; | 492 | chan->omtu = opts.omtu; |
536 | chan->fcs = opts.fcs; | 493 | chan->fcs = opts.fcs; |
537 | chan->max_tx = opts.max_tx; | 494 | chan->max_tx = opts.max_tx; |
538 | chan->tx_win = (__u8)opts.txwin_size; | 495 | chan->tx_win = opts.txwin_size; |
539 | break; | 496 | break; |
540 | 497 | ||
541 | case L2CAP_LM: | 498 | case L2CAP_LM: |
@@ -551,8 +508,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
551 | if (opt & L2CAP_LM_SECURE) | 508 | if (opt & L2CAP_LM_SECURE) |
552 | chan->sec_level = BT_SECURITY_HIGH; | 509 | chan->sec_level = BT_SECURITY_HIGH; |
553 | 510 | ||
554 | chan->role_switch = (opt & L2CAP_LM_MASTER); | 511 | if (opt & L2CAP_LM_MASTER) |
555 | chan->force_reliable = (opt & L2CAP_LM_RELIABLE); | 512 | set_bit(FLAG_ROLE_SWITCH, &chan->flags); |
513 | else | ||
514 | clear_bit(FLAG_ROLE_SWITCH, &chan->flags); | ||
515 | |||
516 | if (opt & L2CAP_LM_RELIABLE) | ||
517 | set_bit(FLAG_FORCE_RELIABLE, &chan->flags); | ||
518 | else | ||
519 | clear_bit(FLAG_FORCE_RELIABLE, &chan->flags); | ||
556 | break; | 520 | break; |
557 | 521 | ||
558 | default: | 522 | default: |
@@ -608,8 +572,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
608 | 572 | ||
609 | chan->sec_level = sec.level; | 573 | chan->sec_level = sec.level; |
610 | 574 | ||
575 | if (!chan->conn) | ||
576 | break; | ||
577 | |||
611 | conn = chan->conn; | 578 | conn = chan->conn; |
612 | if (conn && chan->scid == L2CAP_CID_LE_DATA) { | 579 | |
580 | /*change security for LE channels */ | ||
581 | if (chan->scid == L2CAP_CID_LE_DATA) { | ||
613 | if (!conn->hcon->out) { | 582 | if (!conn->hcon->out) { |
614 | err = -EINVAL; | 583 | err = -EINVAL; |
615 | break; | 584 | break; |
@@ -617,9 +586,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
617 | 586 | ||
618 | if (smp_conn_security(conn, sec.level)) | 587 | if (smp_conn_security(conn, sec.level)) |
619 | break; | 588 | break; |
620 | |||
621 | err = 0; | ||
622 | sk->sk_state = BT_CONFIG; | 589 | sk->sk_state = BT_CONFIG; |
590 | |||
591 | /* or for ACL link, under defer_setup time */ | ||
592 | } else if (sk->sk_state == BT_CONNECT2 && | ||
593 | bt_sk(sk)->defer_setup) { | ||
594 | err = l2cap_chan_check_security(chan); | ||
595 | } else { | ||
596 | err = -EINVAL; | ||
623 | } | 597 | } |
624 | break; | 598 | break; |
625 | 599 | ||
@@ -658,7 +632,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
658 | } | 632 | } |
659 | } | 633 | } |
660 | 634 | ||
661 | chan->flushable = opt; | 635 | if (opt) |
636 | set_bit(FLAG_FLUSHABLE, &chan->flags); | ||
637 | else | ||
638 | clear_bit(FLAG_FLUSHABLE, &chan->flags); | ||
662 | break; | 639 | break; |
663 | 640 | ||
664 | case BT_POWER: | 641 | case BT_POWER: |
@@ -675,7 +652,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
675 | err = -EFAULT; | 652 | err = -EFAULT; |
676 | break; | 653 | break; |
677 | } | 654 | } |
678 | chan->force_active = pwr.force_active; | 655 | |
656 | if (pwr.force_active) | ||
657 | set_bit(FLAG_FORCE_ACTIVE, &chan->flags); | ||
658 | else | ||
659 | clear_bit(FLAG_FORCE_ACTIVE, &chan->flags); | ||
660 | break; | ||
661 | |||
662 | case BT_CHANNEL_POLICY: | ||
663 | if (!enable_hs) { | ||
664 | err = -ENOPROTOOPT; | ||
665 | break; | ||
666 | } | ||
667 | |||
668 | if (get_user(opt, (u32 __user *) optval)) { | ||
669 | err = -EFAULT; | ||
670 | break; | ||
671 | } | ||
672 | |||
673 | if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { | ||
674 | err = -EINVAL; | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | if (chan->mode != L2CAP_MODE_ERTM && | ||
679 | chan->mode != L2CAP_MODE_STREAMING) { | ||
680 | err = -EOPNOTSUPP; | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | chan->chan_policy = (u8) opt; | ||
679 | break; | 685 | break; |
680 | 686 | ||
681 | default: | 687 | default: |
@@ -709,7 +715,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
709 | return -ENOTCONN; | 715 | return -ENOTCONN; |
710 | } | 716 | } |
711 | 717 | ||
712 | err = l2cap_chan_send(chan, msg, len); | 718 | err = l2cap_chan_send(chan, msg, len, sk->sk_priority); |
713 | 719 | ||
714 | release_sock(sk); | 720 | release_sock(sk); |
715 | return err; | 721 | return err; |
@@ -931,11 +937,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
931 | chan->fcs = pchan->fcs; | 937 | chan->fcs = pchan->fcs; |
932 | chan->max_tx = pchan->max_tx; | 938 | chan->max_tx = pchan->max_tx; |
933 | chan->tx_win = pchan->tx_win; | 939 | chan->tx_win = pchan->tx_win; |
940 | chan->tx_win_max = pchan->tx_win_max; | ||
934 | chan->sec_level = pchan->sec_level; | 941 | chan->sec_level = pchan->sec_level; |
935 | chan->role_switch = pchan->role_switch; | 942 | chan->flags = pchan->flags; |
936 | chan->force_reliable = pchan->force_reliable; | ||
937 | chan->flushable = pchan->flushable; | ||
938 | chan->force_active = pchan->force_active; | ||
939 | 943 | ||
940 | security_sk_clone(parent, sk); | 944 | security_sk_clone(parent, sk); |
941 | } else { | 945 | } else { |
@@ -964,12 +968,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
964 | chan->max_tx = L2CAP_DEFAULT_MAX_TX; | 968 | chan->max_tx = L2CAP_DEFAULT_MAX_TX; |
965 | chan->fcs = L2CAP_FCS_CRC16; | 969 | chan->fcs = L2CAP_FCS_CRC16; |
966 | chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; | 970 | chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; |
971 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; | ||
967 | chan->sec_level = BT_SECURITY_LOW; | 972 | chan->sec_level = BT_SECURITY_LOW; |
968 | chan->role_switch = 0; | 973 | chan->flags = 0; |
969 | chan->force_reliable = 0; | 974 | set_bit(FLAG_FORCE_ACTIVE, &chan->flags); |
970 | chan->flushable = BT_FLUSHABLE_OFF; | ||
971 | chan->force_active = BT_POWER_FORCE_ACTIVE_ON; | ||
972 | |||
973 | } | 975 | } |
974 | 976 | ||
975 | /* Default config options */ | 977 | /* Default config options */ |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 2c7634296866..2540944d871f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | /* Bluetooth HCI Management interface */ | 23 | /* Bluetooth HCI Management interface */ |
24 | 24 | ||
25 | #include <linux/kernel.h> | ||
25 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
@@ -29,26 +30,103 @@ | |||
29 | #include <net/bluetooth/bluetooth.h> | 30 | #include <net/bluetooth/bluetooth.h> |
30 | #include <net/bluetooth/hci_core.h> | 31 | #include <net/bluetooth/hci_core.h> |
31 | #include <net/bluetooth/mgmt.h> | 32 | #include <net/bluetooth/mgmt.h> |
33 | #include <net/bluetooth/smp.h> | ||
32 | 34 | ||
33 | #define MGMT_VERSION 0 | 35 | #define MGMT_VERSION 0 |
34 | #define MGMT_REVISION 1 | 36 | #define MGMT_REVISION 1 |
35 | 37 | ||
38 | #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ | ||
39 | |||
40 | #define SERVICE_CACHE_TIMEOUT (5 * 1000) | ||
41 | |||
36 | struct pending_cmd { | 42 | struct pending_cmd { |
37 | struct list_head list; | 43 | struct list_head list; |
38 | __u16 opcode; | 44 | u16 opcode; |
39 | int index; | 45 | int index; |
40 | void *param; | 46 | void *param; |
41 | struct sock *sk; | 47 | struct sock *sk; |
42 | void *user_data; | 48 | void *user_data; |
43 | }; | 49 | }; |
44 | 50 | ||
45 | static LIST_HEAD(cmd_list); | 51 | /* HCI to MGMT error code conversion table */ |
52 | static u8 mgmt_status_table[] = { | ||
53 | MGMT_STATUS_SUCCESS, | ||
54 | MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ | ||
55 | MGMT_STATUS_NOT_CONNECTED, /* No Connection */ | ||
56 | MGMT_STATUS_FAILED, /* Hardware Failure */ | ||
57 | MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ | ||
58 | MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ | ||
59 | MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */ | ||
60 | MGMT_STATUS_NO_RESOURCES, /* Memory Full */ | ||
61 | MGMT_STATUS_TIMEOUT, /* Connection Timeout */ | ||
62 | MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ | ||
63 | MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ | ||
64 | MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ | ||
65 | MGMT_STATUS_BUSY, /* Command Disallowed */ | ||
66 | MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ | ||
67 | MGMT_STATUS_REJECTED, /* Rejected Security */ | ||
68 | MGMT_STATUS_REJECTED, /* Rejected Personal */ | ||
69 | MGMT_STATUS_TIMEOUT, /* Host Timeout */ | ||
70 | MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ | ||
71 | MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ | ||
72 | MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ | ||
73 | MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ | ||
74 | MGMT_STATUS_DISCONNECTED, /* OE Power Off */ | ||
75 | MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ | ||
76 | MGMT_STATUS_BUSY, /* Repeated Attempts */ | ||
77 | MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ | ||
78 | MGMT_STATUS_FAILED, /* Unknown LMP PDU */ | ||
79 | MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ | ||
80 | MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ | ||
81 | MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ | ||
82 | MGMT_STATUS_REJECTED, /* Air Mode Rejected */ | ||
83 | MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ | ||
84 | MGMT_STATUS_FAILED, /* Unspecified Error */ | ||
85 | MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ | ||
86 | MGMT_STATUS_FAILED, /* Role Change Not Allowed */ | ||
87 | MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ | ||
88 | MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ | ||
89 | MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ | ||
90 | MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ | ||
91 | MGMT_STATUS_FAILED, /* Unit Link Key Used */ | ||
92 | MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ | ||
93 | MGMT_STATUS_TIMEOUT, /* Instant Passed */ | ||
94 | MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ | ||
95 | MGMT_STATUS_FAILED, /* Transaction Collision */ | ||
96 | MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ | ||
97 | MGMT_STATUS_REJECTED, /* QoS Rejected */ | ||
98 | MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ | ||
99 | MGMT_STATUS_REJECTED, /* Insufficient Security */ | ||
100 | MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ | ||
101 | MGMT_STATUS_BUSY, /* Role Switch Pending */ | ||
102 | MGMT_STATUS_FAILED, /* Slot Violation */ | ||
103 | MGMT_STATUS_FAILED, /* Role Switch Failed */ | ||
104 | MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ | ||
105 | MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ | ||
106 | MGMT_STATUS_BUSY, /* Host Busy Pairing */ | ||
107 | MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ | ||
108 | MGMT_STATUS_BUSY, /* Controller Busy */ | ||
109 | MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ | ||
110 | MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ | ||
111 | MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ | ||
112 | MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ | ||
113 | MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ | ||
114 | }; | ||
115 | |||
116 | static u8 mgmt_status(u8 hci_status) | ||
117 | { | ||
118 | if (hci_status < ARRAY_SIZE(mgmt_status_table)) | ||
119 | return mgmt_status_table[hci_status]; | ||
120 | |||
121 | return MGMT_STATUS_FAILED; | ||
122 | } | ||
46 | 123 | ||
47 | static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) | 124 | static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) |
48 | { | 125 | { |
49 | struct sk_buff *skb; | 126 | struct sk_buff *skb; |
50 | struct mgmt_hdr *hdr; | 127 | struct mgmt_hdr *hdr; |
51 | struct mgmt_ev_cmd_status *ev; | 128 | struct mgmt_ev_cmd_status *ev; |
129 | int err; | ||
52 | 130 | ||
53 | BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); | 131 | BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); |
54 | 132 | ||
@@ -66,10 +144,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) | |||
66 | ev->status = status; | 144 | ev->status = status; |
67 | put_unaligned_le16(cmd, &ev->opcode); | 145 | put_unaligned_le16(cmd, &ev->opcode); |
68 | 146 | ||
69 | if (sock_queue_rcv_skb(sk, skb) < 0) | 147 | err = sock_queue_rcv_skb(sk, skb); |
148 | if (err < 0) | ||
70 | kfree_skb(skb); | 149 | kfree_skb(skb); |
71 | 150 | ||
72 | return 0; | 151 | return err; |
73 | } | 152 | } |
74 | 153 | ||
75 | static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, | 154 | static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, |
@@ -78,6 +157,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, | |||
78 | struct sk_buff *skb; | 157 | struct sk_buff *skb; |
79 | struct mgmt_hdr *hdr; | 158 | struct mgmt_hdr *hdr; |
80 | struct mgmt_ev_cmd_complete *ev; | 159 | struct mgmt_ev_cmd_complete *ev; |
160 | int err; | ||
81 | 161 | ||
82 | BT_DBG("sock %p", sk); | 162 | BT_DBG("sock %p", sk); |
83 | 163 | ||
@@ -97,10 +177,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, | |||
97 | if (rp) | 177 | if (rp) |
98 | memcpy(ev->data, rp, rp_len); | 178 | memcpy(ev->data, rp, rp_len); |
99 | 179 | ||
100 | if (sock_queue_rcv_skb(sk, skb) < 0) | 180 | err = sock_queue_rcv_skb(sk, skb); |
181 | if (err < 0) | ||
101 | kfree_skb(skb); | 182 | kfree_skb(skb); |
102 | 183 | ||
103 | return 0; | 184 | return err;; |
104 | } | 185 | } |
105 | 186 | ||
106 | static int read_version(struct sock *sk) | 187 | static int read_version(struct sock *sk) |
@@ -120,6 +201,7 @@ static int read_index_list(struct sock *sk) | |||
120 | { | 201 | { |
121 | struct mgmt_rp_read_index_list *rp; | 202 | struct mgmt_rp_read_index_list *rp; |
122 | struct list_head *p; | 203 | struct list_head *p; |
204 | struct hci_dev *d; | ||
123 | size_t rp_len; | 205 | size_t rp_len; |
124 | u16 count; | 206 | u16 count; |
125 | int i, err; | 207 | int i, err; |
@@ -143,10 +225,9 @@ static int read_index_list(struct sock *sk) | |||
143 | put_unaligned_le16(count, &rp->num_controllers); | 225 | put_unaligned_le16(count, &rp->num_controllers); |
144 | 226 | ||
145 | i = 0; | 227 | i = 0; |
146 | list_for_each(p, &hci_dev_list) { | 228 | list_for_each_entry(d, &hci_dev_list, list) { |
147 | struct hci_dev *d = list_entry(p, struct hci_dev, list); | 229 | if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags)) |
148 | 230 | cancel_delayed_work(&d->power_off); | |
149 | hci_del_off_timer(d); | ||
150 | 231 | ||
151 | if (test_bit(HCI_SETUP, &d->flags)) | 232 | if (test_bit(HCI_SETUP, &d->flags)) |
152 | continue; | 233 | continue; |
@@ -165,6 +246,262 @@ static int read_index_list(struct sock *sk) | |||
165 | return err; | 246 | return err; |
166 | } | 247 | } |
167 | 248 | ||
249 | static u32 get_supported_settings(struct hci_dev *hdev) | ||
250 | { | ||
251 | u32 settings = 0; | ||
252 | |||
253 | settings |= MGMT_SETTING_POWERED; | ||
254 | settings |= MGMT_SETTING_CONNECTABLE; | ||
255 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | ||
256 | settings |= MGMT_SETTING_DISCOVERABLE; | ||
257 | settings |= MGMT_SETTING_PAIRABLE; | ||
258 | |||
259 | if (hdev->features[6] & LMP_SIMPLE_PAIR) | ||
260 | settings |= MGMT_SETTING_SSP; | ||
261 | |||
262 | if (!(hdev->features[4] & LMP_NO_BREDR)) { | ||
263 | settings |= MGMT_SETTING_BREDR; | ||
264 | settings |= MGMT_SETTING_LINK_SECURITY; | ||
265 | } | ||
266 | |||
267 | if (hdev->features[4] & LMP_LE) | ||
268 | settings |= MGMT_SETTING_LE; | ||
269 | |||
270 | return settings; | ||
271 | } | ||
272 | |||
273 | static u32 get_current_settings(struct hci_dev *hdev) | ||
274 | { | ||
275 | u32 settings = 0; | ||
276 | |||
277 | if (test_bit(HCI_UP, &hdev->flags)) | ||
278 | settings |= MGMT_SETTING_POWERED; | ||
279 | else | ||
280 | return settings; | ||
281 | |||
282 | if (test_bit(HCI_PSCAN, &hdev->flags)) | ||
283 | settings |= MGMT_SETTING_CONNECTABLE; | ||
284 | |||
285 | if (test_bit(HCI_ISCAN, &hdev->flags)) | ||
286 | settings |= MGMT_SETTING_DISCOVERABLE; | ||
287 | |||
288 | if (test_bit(HCI_PAIRABLE, &hdev->flags)) | ||
289 | settings |= MGMT_SETTING_PAIRABLE; | ||
290 | |||
291 | if (!(hdev->features[4] & LMP_NO_BREDR)) | ||
292 | settings |= MGMT_SETTING_BREDR; | ||
293 | |||
294 | if (hdev->extfeatures[0] & LMP_HOST_LE) | ||
295 | settings |= MGMT_SETTING_LE; | ||
296 | |||
297 | if (test_bit(HCI_AUTH, &hdev->flags)) | ||
298 | settings |= MGMT_SETTING_LINK_SECURITY; | ||
299 | |||
300 | if (hdev->ssp_mode > 0) | ||
301 | settings |= MGMT_SETTING_SSP; | ||
302 | |||
303 | return settings; | ||
304 | } | ||
305 | |||
306 | #define EIR_FLAGS 0x01 /* flags */ | ||
307 | #define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ | ||
308 | #define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ | ||
309 | #define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ | ||
310 | #define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ | ||
311 | #define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ | ||
312 | #define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ | ||
313 | #define EIR_NAME_SHORT 0x08 /* shortened local name */ | ||
314 | #define EIR_NAME_COMPLETE 0x09 /* complete local name */ | ||
315 | #define EIR_TX_POWER 0x0A /* transmit power level */ | ||
316 | #define EIR_DEVICE_ID 0x10 /* device ID */ | ||
317 | |||
318 | #define PNP_INFO_SVCLASS_ID 0x1200 | ||
319 | |||
320 | static u8 bluetooth_base_uuid[] = { | ||
321 | 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, | ||
322 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
323 | }; | ||
324 | |||
325 | static u16 get_uuid16(u8 *uuid128) | ||
326 | { | ||
327 | u32 val; | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < 12; i++) { | ||
331 | if (bluetooth_base_uuid[i] != uuid128[i]) | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | memcpy(&val, &uuid128[12], 4); | ||
336 | |||
337 | val = le32_to_cpu(val); | ||
338 | if (val > 0xffff) | ||
339 | return 0; | ||
340 | |||
341 | return (u16) val; | ||
342 | } | ||
343 | |||
344 | static void create_eir(struct hci_dev *hdev, u8 *data) | ||
345 | { | ||
346 | u8 *ptr = data; | ||
347 | u16 eir_len = 0; | ||
348 | u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; | ||
349 | int i, truncated = 0; | ||
350 | struct bt_uuid *uuid; | ||
351 | size_t name_len; | ||
352 | |||
353 | name_len = strlen(hdev->dev_name); | ||
354 | |||
355 | if (name_len > 0) { | ||
356 | /* EIR Data type */ | ||
357 | if (name_len > 48) { | ||
358 | name_len = 48; | ||
359 | ptr[1] = EIR_NAME_SHORT; | ||
360 | } else | ||
361 | ptr[1] = EIR_NAME_COMPLETE; | ||
362 | |||
363 | /* EIR Data length */ | ||
364 | ptr[0] = name_len + 1; | ||
365 | |||
366 | memcpy(ptr + 2, hdev->dev_name, name_len); | ||
367 | |||
368 | eir_len += (name_len + 2); | ||
369 | ptr += (name_len + 2); | ||
370 | } | ||
371 | |||
372 | memset(uuid16_list, 0, sizeof(uuid16_list)); | ||
373 | |||
374 | /* Group all UUID16 types */ | ||
375 | list_for_each_entry(uuid, &hdev->uuids, list) { | ||
376 | u16 uuid16; | ||
377 | |||
378 | uuid16 = get_uuid16(uuid->uuid); | ||
379 | if (uuid16 == 0) | ||
380 | return; | ||
381 | |||
382 | if (uuid16 < 0x1100) | ||
383 | continue; | ||
384 | |||
385 | if (uuid16 == PNP_INFO_SVCLASS_ID) | ||
386 | continue; | ||
387 | |||
388 | /* Stop if not enough space to put next UUID */ | ||
389 | if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) { | ||
390 | truncated = 1; | ||
391 | break; | ||
392 | } | ||
393 | |||
394 | /* Check for duplicates */ | ||
395 | for (i = 0; uuid16_list[i] != 0; i++) | ||
396 | if (uuid16_list[i] == uuid16) | ||
397 | break; | ||
398 | |||
399 | if (uuid16_list[i] == 0) { | ||
400 | uuid16_list[i] = uuid16; | ||
401 | eir_len += sizeof(u16); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | if (uuid16_list[0] != 0) { | ||
406 | u8 *length = ptr; | ||
407 | |||
408 | /* EIR Data type */ | ||
409 | ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL; | ||
410 | |||
411 | ptr += 2; | ||
412 | eir_len += 2; | ||
413 | |||
414 | for (i = 0; uuid16_list[i] != 0; i++) { | ||
415 | *ptr++ = (uuid16_list[i] & 0x00ff); | ||
416 | *ptr++ = (uuid16_list[i] & 0xff00) >> 8; | ||
417 | } | ||
418 | |||
419 | /* EIR Data length */ | ||
420 | *length = (i * sizeof(u16)) + 1; | ||
421 | } | ||
422 | } | ||
423 | |||
424 | static int update_eir(struct hci_dev *hdev) | ||
425 | { | ||
426 | struct hci_cp_write_eir cp; | ||
427 | |||
428 | if (!(hdev->features[6] & LMP_EXT_INQ)) | ||
429 | return 0; | ||
430 | |||
431 | if (hdev->ssp_mode == 0) | ||
432 | return 0; | ||
433 | |||
434 | if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
435 | return 0; | ||
436 | |||
437 | memset(&cp, 0, sizeof(cp)); | ||
438 | |||
439 | create_eir(hdev, cp.data); | ||
440 | |||
441 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | ||
442 | return 0; | ||
443 | |||
444 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | ||
445 | |||
446 | return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | ||
447 | } | ||
448 | |||
449 | static u8 get_service_classes(struct hci_dev *hdev) | ||
450 | { | ||
451 | struct bt_uuid *uuid; | ||
452 | u8 val = 0; | ||
453 | |||
454 | list_for_each_entry(uuid, &hdev->uuids, list) | ||
455 | val |= uuid->svc_hint; | ||
456 | |||
457 | return val; | ||
458 | } | ||
459 | |||
460 | static int update_class(struct hci_dev *hdev) | ||
461 | { | ||
462 | u8 cod[3]; | ||
463 | |||
464 | BT_DBG("%s", hdev->name); | ||
465 | |||
466 | if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
467 | return 0; | ||
468 | |||
469 | cod[0] = hdev->minor_class; | ||
470 | cod[1] = hdev->major_class; | ||
471 | cod[2] = get_service_classes(hdev); | ||
472 | |||
473 | if (memcmp(cod, hdev->dev_class, 3) == 0) | ||
474 | return 0; | ||
475 | |||
476 | return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); | ||
477 | } | ||
478 | |||
479 | static void service_cache_off(struct work_struct *work) | ||
480 | { | ||
481 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
482 | service_cache.work); | ||
483 | |||
484 | if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
485 | return; | ||
486 | |||
487 | hci_dev_lock(hdev); | ||
488 | |||
489 | update_eir(hdev); | ||
490 | update_class(hdev); | ||
491 | |||
492 | hci_dev_unlock(hdev); | ||
493 | } | ||
494 | |||
495 | static void mgmt_init_hdev(struct hci_dev *hdev) | ||
496 | { | ||
497 | if (!test_and_set_bit(HCI_MGMT, &hdev->flags)) | ||
498 | INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); | ||
499 | |||
500 | if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
501 | schedule_delayed_work(&hdev->service_cache, | ||
502 | msecs_to_jiffies(SERVICE_CACHE_TIMEOUT)); | ||
503 | } | ||
504 | |||
168 | static int read_controller_info(struct sock *sk, u16 index) | 505 | static int read_controller_info(struct sock *sk, u16 index) |
169 | { | 506 | { |
170 | struct mgmt_rp_read_info rp; | 507 | struct mgmt_rp_read_info rp; |
@@ -174,40 +511,33 @@ static int read_controller_info(struct sock *sk, u16 index) | |||
174 | 511 | ||
175 | hdev = hci_dev_get(index); | 512 | hdev = hci_dev_get(index); |
176 | if (!hdev) | 513 | if (!hdev) |
177 | return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); | 514 | return cmd_status(sk, index, MGMT_OP_READ_INFO, |
515 | MGMT_STATUS_INVALID_PARAMS); | ||
178 | 516 | ||
179 | hci_del_off_timer(hdev); | 517 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
518 | cancel_delayed_work_sync(&hdev->power_off); | ||
180 | 519 | ||
181 | hci_dev_lock_bh(hdev); | 520 | hci_dev_lock(hdev); |
182 | 521 | ||
183 | set_bit(HCI_MGMT, &hdev->flags); | 522 | if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags)) |
523 | mgmt_init_hdev(hdev); | ||
184 | 524 | ||
185 | memset(&rp, 0, sizeof(rp)); | 525 | memset(&rp, 0, sizeof(rp)); |
186 | 526 | ||
187 | rp.type = hdev->dev_type; | 527 | bacpy(&rp.bdaddr, &hdev->bdaddr); |
188 | 528 | ||
189 | rp.powered = test_bit(HCI_UP, &hdev->flags); | 529 | rp.version = hdev->hci_ver; |
190 | rp.connectable = test_bit(HCI_PSCAN, &hdev->flags); | ||
191 | rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags); | ||
192 | rp.pairable = test_bit(HCI_PSCAN, &hdev->flags); | ||
193 | 530 | ||
194 | if (test_bit(HCI_AUTH, &hdev->flags)) | 531 | put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); |
195 | rp.sec_mode = 3; | 532 | |
196 | else if (hdev->ssp_mode > 0) | 533 | rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); |
197 | rp.sec_mode = 4; | 534 | rp.current_settings = cpu_to_le32(get_current_settings(hdev)); |
198 | else | ||
199 | rp.sec_mode = 2; | ||
200 | 535 | ||
201 | bacpy(&rp.bdaddr, &hdev->bdaddr); | ||
202 | memcpy(rp.features, hdev->features, 8); | ||
203 | memcpy(rp.dev_class, hdev->dev_class, 3); | 536 | memcpy(rp.dev_class, hdev->dev_class, 3); |
204 | put_unaligned_le16(hdev->manufacturer, &rp.manufacturer); | ||
205 | rp.hci_ver = hdev->hci_ver; | ||
206 | put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); | ||
207 | 537 | ||
208 | memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); | 538 | memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); |
209 | 539 | ||
210 | hci_dev_unlock_bh(hdev); | 540 | hci_dev_unlock(hdev); |
211 | hci_dev_put(hdev); | 541 | hci_dev_put(hdev); |
212 | 542 | ||
213 | return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); | 543 | return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); |
@@ -221,7 +551,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd) | |||
221 | } | 551 | } |
222 | 552 | ||
223 | static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, | 553 | static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, |
224 | u16 index, void *data, u16 len) | 554 | struct hci_dev *hdev, |
555 | void *data, u16 len) | ||
225 | { | 556 | { |
226 | struct pending_cmd *cmd; | 557 | struct pending_cmd *cmd; |
227 | 558 | ||
@@ -230,7 +561,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, | |||
230 | return NULL; | 561 | return NULL; |
231 | 562 | ||
232 | cmd->opcode = opcode; | 563 | cmd->opcode = opcode; |
233 | cmd->index = index; | 564 | cmd->index = hdev->id; |
234 | 565 | ||
235 | cmd->param = kmalloc(len, GFP_ATOMIC); | 566 | cmd->param = kmalloc(len, GFP_ATOMIC); |
236 | if (!cmd->param) { | 567 | if (!cmd->param) { |
@@ -244,48 +575,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, | |||
244 | cmd->sk = sk; | 575 | cmd->sk = sk; |
245 | sock_hold(sk); | 576 | sock_hold(sk); |
246 | 577 | ||
247 | list_add(&cmd->list, &cmd_list); | 578 | list_add(&cmd->list, &hdev->mgmt_pending); |
248 | 579 | ||
249 | return cmd; | 580 | return cmd; |
250 | } | 581 | } |
251 | 582 | ||
252 | static void mgmt_pending_foreach(u16 opcode, int index, | 583 | static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, |
253 | void (*cb)(struct pending_cmd *cmd, void *data), | 584 | void (*cb)(struct pending_cmd *cmd, void *data), |
254 | void *data) | 585 | void *data) |
255 | { | 586 | { |
256 | struct list_head *p, *n; | 587 | struct list_head *p, *n; |
257 | 588 | ||
258 | list_for_each_safe(p, n, &cmd_list) { | 589 | list_for_each_safe(p, n, &hdev->mgmt_pending) { |
259 | struct pending_cmd *cmd; | 590 | struct pending_cmd *cmd; |
260 | 591 | ||
261 | cmd = list_entry(p, struct pending_cmd, list); | 592 | cmd = list_entry(p, struct pending_cmd, list); |
262 | 593 | ||
263 | if (cmd->opcode != opcode) | 594 | if (opcode > 0 && cmd->opcode != opcode) |
264 | continue; | ||
265 | |||
266 | if (index >= 0 && cmd->index != index) | ||
267 | continue; | 595 | continue; |
268 | 596 | ||
269 | cb(cmd, data); | 597 | cb(cmd, data); |
270 | } | 598 | } |
271 | } | 599 | } |
272 | 600 | ||
273 | static struct pending_cmd *mgmt_pending_find(u16 opcode, int index) | 601 | static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) |
274 | { | 602 | { |
275 | struct list_head *p; | 603 | struct pending_cmd *cmd; |
276 | |||
277 | list_for_each(p, &cmd_list) { | ||
278 | struct pending_cmd *cmd; | ||
279 | |||
280 | cmd = list_entry(p, struct pending_cmd, list); | ||
281 | |||
282 | if (cmd->opcode != opcode) | ||
283 | continue; | ||
284 | |||
285 | if (index >= 0 && cmd->index != index) | ||
286 | continue; | ||
287 | 604 | ||
288 | return cmd; | 605 | list_for_each_entry(cmd, &hdev->mgmt_pending, list) { |
606 | if (cmd->opcode == opcode) | ||
607 | return cmd; | ||
289 | } | 608 | } |
290 | 609 | ||
291 | return NULL; | 610 | return NULL; |
@@ -297,6 +616,13 @@ static void mgmt_pending_remove(struct pending_cmd *cmd) | |||
297 | mgmt_pending_free(cmd); | 616 | mgmt_pending_free(cmd); |
298 | } | 617 | } |
299 | 618 | ||
619 | static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) | ||
620 | { | ||
621 | __le32 settings = cpu_to_le32(get_current_settings(hdev)); | ||
622 | |||
623 | return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings)); | ||
624 | } | ||
625 | |||
300 | static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) | 626 | static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) |
301 | { | 627 | { |
302 | struct mgmt_mode *cp; | 628 | struct mgmt_mode *cp; |
@@ -309,40 +635,43 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
309 | BT_DBG("request for hci%u", index); | 635 | BT_DBG("request for hci%u", index); |
310 | 636 | ||
311 | if (len != sizeof(*cp)) | 637 | if (len != sizeof(*cp)) |
312 | return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL); | 638 | return cmd_status(sk, index, MGMT_OP_SET_POWERED, |
639 | MGMT_STATUS_INVALID_PARAMS); | ||
313 | 640 | ||
314 | hdev = hci_dev_get(index); | 641 | hdev = hci_dev_get(index); |
315 | if (!hdev) | 642 | if (!hdev) |
316 | return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); | 643 | return cmd_status(sk, index, MGMT_OP_SET_POWERED, |
644 | MGMT_STATUS_INVALID_PARAMS); | ||
317 | 645 | ||
318 | hci_dev_lock_bh(hdev); | 646 | hci_dev_lock(hdev); |
319 | 647 | ||
320 | up = test_bit(HCI_UP, &hdev->flags); | 648 | up = test_bit(HCI_UP, &hdev->flags); |
321 | if ((cp->val && up) || (!cp->val && !up)) { | 649 | if ((cp->val && up) || (!cp->val && !up)) { |
322 | err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY); | 650 | err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); |
323 | goto failed; | 651 | goto failed; |
324 | } | 652 | } |
325 | 653 | ||
326 | if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { | 654 | if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { |
327 | err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); | 655 | err = cmd_status(sk, index, MGMT_OP_SET_POWERED, |
656 | MGMT_STATUS_BUSY); | ||
328 | goto failed; | 657 | goto failed; |
329 | } | 658 | } |
330 | 659 | ||
331 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); | 660 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); |
332 | if (!cmd) { | 661 | if (!cmd) { |
333 | err = -ENOMEM; | 662 | err = -ENOMEM; |
334 | goto failed; | 663 | goto failed; |
335 | } | 664 | } |
336 | 665 | ||
337 | if (cp->val) | 666 | if (cp->val) |
338 | queue_work(hdev->workqueue, &hdev->power_on); | 667 | schedule_work(&hdev->power_on); |
339 | else | 668 | else |
340 | queue_work(hdev->workqueue, &hdev->power_off); | 669 | schedule_work(&hdev->power_off.work); |
341 | 670 | ||
342 | err = 0; | 671 | err = 0; |
343 | 672 | ||
344 | failed: | 673 | failed: |
345 | hci_dev_unlock_bh(hdev); | 674 | hci_dev_unlock(hdev); |
346 | hci_dev_put(hdev); | 675 | hci_dev_put(hdev); |
347 | return err; | 676 | return err; |
348 | } | 677 | } |
@@ -350,7 +679,7 @@ failed: | |||
350 | static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, | 679 | static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, |
351 | u16 len) | 680 | u16 len) |
352 | { | 681 | { |
353 | struct mgmt_mode *cp; | 682 | struct mgmt_cp_set_discoverable *cp; |
354 | struct hci_dev *hdev; | 683 | struct hci_dev *hdev; |
355 | struct pending_cmd *cmd; | 684 | struct pending_cmd *cmd; |
356 | u8 scan; | 685 | u8 scan; |
@@ -361,32 +690,36 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, | |||
361 | BT_DBG("request for hci%u", index); | 690 | BT_DBG("request for hci%u", index); |
362 | 691 | ||
363 | if (len != sizeof(*cp)) | 692 | if (len != sizeof(*cp)) |
364 | return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL); | 693 | return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, |
694 | MGMT_STATUS_INVALID_PARAMS); | ||
365 | 695 | ||
366 | hdev = hci_dev_get(index); | 696 | hdev = hci_dev_get(index); |
367 | if (!hdev) | 697 | if (!hdev) |
368 | return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); | 698 | return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, |
699 | MGMT_STATUS_INVALID_PARAMS); | ||
369 | 700 | ||
370 | hci_dev_lock_bh(hdev); | 701 | hci_dev_lock(hdev); |
371 | 702 | ||
372 | if (!test_bit(HCI_UP, &hdev->flags)) { | 703 | if (!test_bit(HCI_UP, &hdev->flags)) { |
373 | err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); | 704 | err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, |
705 | MGMT_STATUS_NOT_POWERED); | ||
374 | goto failed; | 706 | goto failed; |
375 | } | 707 | } |
376 | 708 | ||
377 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || | 709 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || |
378 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { | 710 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { |
379 | err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); | 711 | err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, |
712 | MGMT_STATUS_BUSY); | ||
380 | goto failed; | 713 | goto failed; |
381 | } | 714 | } |
382 | 715 | ||
383 | if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && | 716 | if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && |
384 | test_bit(HCI_PSCAN, &hdev->flags)) { | 717 | test_bit(HCI_PSCAN, &hdev->flags)) { |
385 | err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY); | 718 | err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); |
386 | goto failed; | 719 | goto failed; |
387 | } | 720 | } |
388 | 721 | ||
389 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); | 722 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); |
390 | if (!cmd) { | 723 | if (!cmd) { |
391 | err = -ENOMEM; | 724 | err = -ENOMEM; |
392 | goto failed; | 725 | goto failed; |
@@ -396,13 +729,18 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, | |||
396 | 729 | ||
397 | if (cp->val) | 730 | if (cp->val) |
398 | scan |= SCAN_INQUIRY; | 731 | scan |= SCAN_INQUIRY; |
732 | else | ||
733 | cancel_delayed_work(&hdev->discov_off); | ||
399 | 734 | ||
400 | err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 735 | err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
401 | if (err < 0) | 736 | if (err < 0) |
402 | mgmt_pending_remove(cmd); | 737 | mgmt_pending_remove(cmd); |
403 | 738 | ||
739 | if (cp->val) | ||
740 | hdev->discov_timeout = get_unaligned_le16(&cp->timeout); | ||
741 | |||
404 | failed: | 742 | failed: |
405 | hci_dev_unlock_bh(hdev); | 743 | hci_dev_unlock(hdev); |
406 | hci_dev_put(hdev); | 744 | hci_dev_put(hdev); |
407 | 745 | ||
408 | return err; | 746 | return err; |
@@ -422,31 +760,35 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, | |||
422 | BT_DBG("request for hci%u", index); | 760 | BT_DBG("request for hci%u", index); |
423 | 761 | ||
424 | if (len != sizeof(*cp)) | 762 | if (len != sizeof(*cp)) |
425 | return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL); | 763 | return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, |
764 | MGMT_STATUS_INVALID_PARAMS); | ||
426 | 765 | ||
427 | hdev = hci_dev_get(index); | 766 | hdev = hci_dev_get(index); |
428 | if (!hdev) | 767 | if (!hdev) |
429 | return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); | 768 | return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, |
769 | MGMT_STATUS_INVALID_PARAMS); | ||
430 | 770 | ||
431 | hci_dev_lock_bh(hdev); | 771 | hci_dev_lock(hdev); |
432 | 772 | ||
433 | if (!test_bit(HCI_UP, &hdev->flags)) { | 773 | if (!test_bit(HCI_UP, &hdev->flags)) { |
434 | err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); | 774 | err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, |
775 | MGMT_STATUS_NOT_POWERED); | ||
435 | goto failed; | 776 | goto failed; |
436 | } | 777 | } |
437 | 778 | ||
438 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || | 779 | if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || |
439 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { | 780 | mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { |
440 | err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); | 781 | err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, |
782 | MGMT_STATUS_BUSY); | ||
441 | goto failed; | 783 | goto failed; |
442 | } | 784 | } |
443 | 785 | ||
444 | if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { | 786 | if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { |
445 | err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY); | 787 | err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); |
446 | goto failed; | 788 | goto failed; |
447 | } | 789 | } |
448 | 790 | ||
449 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); | 791 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); |
450 | if (!cmd) { | 792 | if (!cmd) { |
451 | err = -ENOMEM; | 793 | err = -ENOMEM; |
452 | goto failed; | 794 | goto failed; |
@@ -462,14 +804,14 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, | |||
462 | mgmt_pending_remove(cmd); | 804 | mgmt_pending_remove(cmd); |
463 | 805 | ||
464 | failed: | 806 | failed: |
465 | hci_dev_unlock_bh(hdev); | 807 | hci_dev_unlock(hdev); |
466 | hci_dev_put(hdev); | 808 | hci_dev_put(hdev); |
467 | 809 | ||
468 | return err; | 810 | return err; |
469 | } | 811 | } |
470 | 812 | ||
471 | static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, | 813 | static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, |
472 | struct sock *skip_sk) | 814 | u16 data_len, struct sock *skip_sk) |
473 | { | 815 | { |
474 | struct sk_buff *skb; | 816 | struct sk_buff *skb; |
475 | struct mgmt_hdr *hdr; | 817 | struct mgmt_hdr *hdr; |
@@ -482,7 +824,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, | |||
482 | 824 | ||
483 | hdr = (void *) skb_put(skb, sizeof(*hdr)); | 825 | hdr = (void *) skb_put(skb, sizeof(*hdr)); |
484 | hdr->opcode = cpu_to_le16(event); | 826 | hdr->opcode = cpu_to_le16(event); |
485 | hdr->index = cpu_to_le16(index); | 827 | if (hdev) |
828 | hdr->index = cpu_to_le16(hdev->id); | ||
829 | else | ||
830 | hdr->index = cpu_to_le16(MGMT_INDEX_NONE); | ||
486 | hdr->len = cpu_to_le16(data_len); | 831 | hdr->len = cpu_to_le16(data_len); |
487 | 832 | ||
488 | if (data) | 833 | if (data) |
@@ -494,20 +839,12 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, | |||
494 | return 0; | 839 | return 0; |
495 | } | 840 | } |
496 | 841 | ||
497 | static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val) | ||
498 | { | ||
499 | struct mgmt_mode rp; | ||
500 | |||
501 | rp.val = val; | ||
502 | |||
503 | return cmd_complete(sk, index, opcode, &rp, sizeof(rp)); | ||
504 | } | ||
505 | |||
506 | static int set_pairable(struct sock *sk, u16 index, unsigned char *data, | 842 | static int set_pairable(struct sock *sk, u16 index, unsigned char *data, |
507 | u16 len) | 843 | u16 len) |
508 | { | 844 | { |
509 | struct mgmt_mode *cp, ev; | 845 | struct mgmt_mode *cp; |
510 | struct hci_dev *hdev; | 846 | struct hci_dev *hdev; |
847 | __le32 ev; | ||
511 | int err; | 848 | int err; |
512 | 849 | ||
513 | cp = (void *) data; | 850 | cp = (void *) data; |
@@ -515,211 +852,36 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data, | |||
515 | BT_DBG("request for hci%u", index); | 852 | BT_DBG("request for hci%u", index); |
516 | 853 | ||
517 | if (len != sizeof(*cp)) | 854 | if (len != sizeof(*cp)) |
518 | return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL); | 855 | return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, |
856 | MGMT_STATUS_INVALID_PARAMS); | ||
519 | 857 | ||
520 | hdev = hci_dev_get(index); | 858 | hdev = hci_dev_get(index); |
521 | if (!hdev) | 859 | if (!hdev) |
522 | return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); | 860 | return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, |
861 | MGMT_STATUS_INVALID_PARAMS); | ||
523 | 862 | ||
524 | hci_dev_lock_bh(hdev); | 863 | hci_dev_lock(hdev); |
525 | 864 | ||
526 | if (cp->val) | 865 | if (cp->val) |
527 | set_bit(HCI_PAIRABLE, &hdev->flags); | 866 | set_bit(HCI_PAIRABLE, &hdev->flags); |
528 | else | 867 | else |
529 | clear_bit(HCI_PAIRABLE, &hdev->flags); | 868 | clear_bit(HCI_PAIRABLE, &hdev->flags); |
530 | 869 | ||
531 | err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val); | 870 | err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); |
532 | if (err < 0) | 871 | if (err < 0) |
533 | goto failed; | 872 | goto failed; |
534 | 873 | ||
535 | ev.val = cp->val; | 874 | ev = cpu_to_le32(get_current_settings(hdev)); |
536 | 875 | ||
537 | err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); | 876 | err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk); |
538 | 877 | ||
539 | failed: | 878 | failed: |
540 | hci_dev_unlock_bh(hdev); | 879 | hci_dev_unlock(hdev); |
541 | hci_dev_put(hdev); | 880 | hci_dev_put(hdev); |
542 | 881 | ||
543 | return err; | 882 | return err; |
544 | } | 883 | } |
545 | 884 | ||
546 | #define EIR_FLAGS 0x01 /* flags */ | ||
547 | #define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ | ||
548 | #define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ | ||
549 | #define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ | ||
550 | #define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ | ||
551 | #define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ | ||
552 | #define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ | ||
553 | #define EIR_NAME_SHORT 0x08 /* shortened local name */ | ||
554 | #define EIR_NAME_COMPLETE 0x09 /* complete local name */ | ||
555 | #define EIR_TX_POWER 0x0A /* transmit power level */ | ||
556 | #define EIR_DEVICE_ID 0x10 /* device ID */ | ||
557 | |||
558 | #define PNP_INFO_SVCLASS_ID 0x1200 | ||
559 | |||
560 | static u8 bluetooth_base_uuid[] = { | ||
561 | 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, | ||
562 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
563 | }; | ||
564 | |||
565 | static u16 get_uuid16(u8 *uuid128) | ||
566 | { | ||
567 | u32 val; | ||
568 | int i; | ||
569 | |||
570 | for (i = 0; i < 12; i++) { | ||
571 | if (bluetooth_base_uuid[i] != uuid128[i]) | ||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | memcpy(&val, &uuid128[12], 4); | ||
576 | |||
577 | val = le32_to_cpu(val); | ||
578 | if (val > 0xffff) | ||
579 | return 0; | ||
580 | |||
581 | return (u16) val; | ||
582 | } | ||
583 | |||
584 | static void create_eir(struct hci_dev *hdev, u8 *data) | ||
585 | { | ||
586 | u8 *ptr = data; | ||
587 | u16 eir_len = 0; | ||
588 | u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; | ||
589 | int i, truncated = 0; | ||
590 | struct list_head *p; | ||
591 | size_t name_len; | ||
592 | |||
593 | name_len = strlen(hdev->dev_name); | ||
594 | |||
595 | if (name_len > 0) { | ||
596 | /* EIR Data type */ | ||
597 | if (name_len > 48) { | ||
598 | name_len = 48; | ||
599 | ptr[1] = EIR_NAME_SHORT; | ||
600 | } else | ||
601 | ptr[1] = EIR_NAME_COMPLETE; | ||
602 | |||
603 | /* EIR Data length */ | ||
604 | ptr[0] = name_len + 1; | ||
605 | |||
606 | memcpy(ptr + 2, hdev->dev_name, name_len); | ||
607 | |||
608 | eir_len += (name_len + 2); | ||
609 | ptr += (name_len + 2); | ||
610 | } | ||
611 | |||
612 | memset(uuid16_list, 0, sizeof(uuid16_list)); | ||
613 | |||
614 | /* Group all UUID16 types */ | ||
615 | list_for_each(p, &hdev->uuids) { | ||
616 | struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); | ||
617 | u16 uuid16; | ||
618 | |||
619 | uuid16 = get_uuid16(uuid->uuid); | ||
620 | if (uuid16 == 0) | ||
621 | return; | ||
622 | |||
623 | if (uuid16 < 0x1100) | ||
624 | continue; | ||
625 | |||
626 | if (uuid16 == PNP_INFO_SVCLASS_ID) | ||
627 | continue; | ||
628 | |||
629 | /* Stop if not enough space to put next UUID */ | ||
630 | if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) { | ||
631 | truncated = 1; | ||
632 | break; | ||
633 | } | ||
634 | |||
635 | /* Check for duplicates */ | ||
636 | for (i = 0; uuid16_list[i] != 0; i++) | ||
637 | if (uuid16_list[i] == uuid16) | ||
638 | break; | ||
639 | |||
640 | if (uuid16_list[i] == 0) { | ||
641 | uuid16_list[i] = uuid16; | ||
642 | eir_len += sizeof(u16); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | if (uuid16_list[0] != 0) { | ||
647 | u8 *length = ptr; | ||
648 | |||
649 | /* EIR Data type */ | ||
650 | ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL; | ||
651 | |||
652 | ptr += 2; | ||
653 | eir_len += 2; | ||
654 | |||
655 | for (i = 0; uuid16_list[i] != 0; i++) { | ||
656 | *ptr++ = (uuid16_list[i] & 0x00ff); | ||
657 | *ptr++ = (uuid16_list[i] & 0xff00) >> 8; | ||
658 | } | ||
659 | |||
660 | /* EIR Data length */ | ||
661 | *length = (i * sizeof(u16)) + 1; | ||
662 | } | ||
663 | } | ||
664 | |||
665 | static int update_eir(struct hci_dev *hdev) | ||
666 | { | ||
667 | struct hci_cp_write_eir cp; | ||
668 | |||
669 | if (!(hdev->features[6] & LMP_EXT_INQ)) | ||
670 | return 0; | ||
671 | |||
672 | if (hdev->ssp_mode == 0) | ||
673 | return 0; | ||
674 | |||
675 | if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
676 | return 0; | ||
677 | |||
678 | memset(&cp, 0, sizeof(cp)); | ||
679 | |||
680 | create_eir(hdev, cp.data); | ||
681 | |||
682 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | ||
683 | return 0; | ||
684 | |||
685 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | ||
686 | |||
687 | return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | ||
688 | } | ||
689 | |||
690 | static u8 get_service_classes(struct hci_dev *hdev) | ||
691 | { | ||
692 | struct list_head *p; | ||
693 | u8 val = 0; | ||
694 | |||
695 | list_for_each(p, &hdev->uuids) { | ||
696 | struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); | ||
697 | |||
698 | val |= uuid->svc_hint; | ||
699 | } | ||
700 | |||
701 | return val; | ||
702 | } | ||
703 | |||
704 | static int update_class(struct hci_dev *hdev) | ||
705 | { | ||
706 | u8 cod[3]; | ||
707 | |||
708 | BT_DBG("%s", hdev->name); | ||
709 | |||
710 | if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) | ||
711 | return 0; | ||
712 | |||
713 | cod[0] = hdev->minor_class; | ||
714 | cod[1] = hdev->major_class; | ||
715 | cod[2] = get_service_classes(hdev); | ||
716 | |||
717 | if (memcmp(cod, hdev->dev_class, 3) == 0) | ||
718 | return 0; | ||
719 | |||
720 | return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); | ||
721 | } | ||
722 | |||
723 | static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | 885 | static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) |
724 | { | 886 | { |
725 | struct mgmt_cp_add_uuid *cp; | 887 | struct mgmt_cp_add_uuid *cp; |
@@ -732,13 +894,15 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
732 | BT_DBG("request for hci%u", index); | 894 | BT_DBG("request for hci%u", index); |
733 | 895 | ||
734 | if (len != sizeof(*cp)) | 896 | if (len != sizeof(*cp)) |
735 | return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL); | 897 | return cmd_status(sk, index, MGMT_OP_ADD_UUID, |
898 | MGMT_STATUS_INVALID_PARAMS); | ||
736 | 899 | ||
737 | hdev = hci_dev_get(index); | 900 | hdev = hci_dev_get(index); |
738 | if (!hdev) | 901 | if (!hdev) |
739 | return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); | 902 | return cmd_status(sk, index, MGMT_OP_ADD_UUID, |
903 | MGMT_STATUS_INVALID_PARAMS); | ||
740 | 904 | ||
741 | hci_dev_lock_bh(hdev); | 905 | hci_dev_lock(hdev); |
742 | 906 | ||
743 | uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); | 907 | uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); |
744 | if (!uuid) { | 908 | if (!uuid) { |
@@ -762,7 +926,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
762 | err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); | 926 | err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); |
763 | 927 | ||
764 | failed: | 928 | failed: |
765 | hci_dev_unlock_bh(hdev); | 929 | hci_dev_unlock(hdev); |
766 | hci_dev_put(hdev); | 930 | hci_dev_put(hdev); |
767 | 931 | ||
768 | return err; | 932 | return err; |
@@ -781,13 +945,15 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
781 | BT_DBG("request for hci%u", index); | 945 | BT_DBG("request for hci%u", index); |
782 | 946 | ||
783 | if (len != sizeof(*cp)) | 947 | if (len != sizeof(*cp)) |
784 | return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL); | 948 | return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, |
949 | MGMT_STATUS_INVALID_PARAMS); | ||
785 | 950 | ||
786 | hdev = hci_dev_get(index); | 951 | hdev = hci_dev_get(index); |
787 | if (!hdev) | 952 | if (!hdev) |
788 | return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); | 953 | return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, |
954 | MGMT_STATUS_INVALID_PARAMS); | ||
789 | 955 | ||
790 | hci_dev_lock_bh(hdev); | 956 | hci_dev_lock(hdev); |
791 | 957 | ||
792 | if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { | 958 | if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { |
793 | err = hci_uuids_clear(hdev); | 959 | err = hci_uuids_clear(hdev); |
@@ -807,7 +973,8 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
807 | } | 973 | } |
808 | 974 | ||
809 | if (found == 0) { | 975 | if (found == 0) { |
810 | err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT); | 976 | err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, |
977 | MGMT_STATUS_INVALID_PARAMS); | ||
811 | goto unlock; | 978 | goto unlock; |
812 | } | 979 | } |
813 | 980 | ||
@@ -822,7 +989,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
822 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); | 989 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); |
823 | 990 | ||
824 | unlock: | 991 | unlock: |
825 | hci_dev_unlock_bh(hdev); | 992 | hci_dev_unlock(hdev); |
826 | hci_dev_put(hdev); | 993 | hci_dev_put(hdev); |
827 | 994 | ||
828 | return err; | 995 | return err; |
@@ -840,97 +1007,71 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, | |||
840 | BT_DBG("request for hci%u", index); | 1007 | BT_DBG("request for hci%u", index); |
841 | 1008 | ||
842 | if (len != sizeof(*cp)) | 1009 | if (len != sizeof(*cp)) |
843 | return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL); | 1010 | return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, |
1011 | MGMT_STATUS_INVALID_PARAMS); | ||
844 | 1012 | ||
845 | hdev = hci_dev_get(index); | 1013 | hdev = hci_dev_get(index); |
846 | if (!hdev) | 1014 | if (!hdev) |
847 | return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); | 1015 | return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, |
1016 | MGMT_STATUS_INVALID_PARAMS); | ||
848 | 1017 | ||
849 | hci_dev_lock_bh(hdev); | 1018 | hci_dev_lock(hdev); |
850 | 1019 | ||
851 | hdev->major_class = cp->major; | 1020 | hdev->major_class = cp->major; |
852 | hdev->minor_class = cp->minor; | 1021 | hdev->minor_class = cp->minor; |
853 | 1022 | ||
1023 | if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) { | ||
1024 | hci_dev_unlock(hdev); | ||
1025 | cancel_delayed_work_sync(&hdev->service_cache); | ||
1026 | hci_dev_lock(hdev); | ||
1027 | update_eir(hdev); | ||
1028 | } | ||
1029 | |||
854 | err = update_class(hdev); | 1030 | err = update_class(hdev); |
855 | 1031 | ||
856 | if (err == 0) | 1032 | if (err == 0) |
857 | err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); | 1033 | err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); |
858 | 1034 | ||
859 | hci_dev_unlock_bh(hdev); | 1035 | hci_dev_unlock(hdev); |
860 | hci_dev_put(hdev); | ||
861 | |||
862 | return err; | ||
863 | } | ||
864 | |||
865 | static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, | ||
866 | u16 len) | ||
867 | { | ||
868 | struct hci_dev *hdev; | ||
869 | struct mgmt_cp_set_service_cache *cp; | ||
870 | int err; | ||
871 | |||
872 | cp = (void *) data; | ||
873 | |||
874 | if (len != sizeof(*cp)) | ||
875 | return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL); | ||
876 | |||
877 | hdev = hci_dev_get(index); | ||
878 | if (!hdev) | ||
879 | return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); | ||
880 | |||
881 | hci_dev_lock_bh(hdev); | ||
882 | |||
883 | BT_DBG("hci%u enable %d", index, cp->enable); | ||
884 | |||
885 | if (cp->enable) { | ||
886 | set_bit(HCI_SERVICE_CACHE, &hdev->flags); | ||
887 | err = 0; | ||
888 | } else { | ||
889 | clear_bit(HCI_SERVICE_CACHE, &hdev->flags); | ||
890 | err = update_class(hdev); | ||
891 | if (err == 0) | ||
892 | err = update_eir(hdev); | ||
893 | } | ||
894 | |||
895 | if (err == 0) | ||
896 | err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, | ||
897 | 0); | ||
898 | |||
899 | hci_dev_unlock_bh(hdev); | ||
900 | hci_dev_put(hdev); | 1036 | hci_dev_put(hdev); |
901 | 1037 | ||
902 | return err; | 1038 | return err; |
903 | } | 1039 | } |
904 | 1040 | ||
905 | static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) | 1041 | static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, |
1042 | u16 len) | ||
906 | { | 1043 | { |
907 | struct hci_dev *hdev; | 1044 | struct hci_dev *hdev; |
908 | struct mgmt_cp_load_keys *cp; | 1045 | struct mgmt_cp_load_link_keys *cp; |
909 | u16 key_count, expected_len; | 1046 | u16 key_count, expected_len; |
910 | int i; | 1047 | int i; |
911 | 1048 | ||
912 | cp = (void *) data; | 1049 | cp = (void *) data; |
913 | 1050 | ||
914 | if (len < sizeof(*cp)) | 1051 | if (len < sizeof(*cp)) |
915 | return -EINVAL; | 1052 | return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, |
1053 | MGMT_STATUS_INVALID_PARAMS); | ||
916 | 1054 | ||
917 | key_count = get_unaligned_le16(&cp->key_count); | 1055 | key_count = get_unaligned_le16(&cp->key_count); |
918 | 1056 | ||
919 | expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); | 1057 | expected_len = sizeof(*cp) + key_count * |
1058 | sizeof(struct mgmt_link_key_info); | ||
920 | if (expected_len != len) { | 1059 | if (expected_len != len) { |
921 | BT_ERR("load_keys: expected %u bytes, got %u bytes", | 1060 | BT_ERR("load_link_keys: expected %u bytes, got %u bytes", |
922 | len, expected_len); | 1061 | len, expected_len); |
923 | return -EINVAL; | 1062 | return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, |
1063 | MGMT_STATUS_INVALID_PARAMS); | ||
924 | } | 1064 | } |
925 | 1065 | ||
926 | hdev = hci_dev_get(index); | 1066 | hdev = hci_dev_get(index); |
927 | if (!hdev) | 1067 | if (!hdev) |
928 | return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); | 1068 | return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, |
1069 | MGMT_STATUS_INVALID_PARAMS); | ||
929 | 1070 | ||
930 | BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, | 1071 | BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, |
931 | key_count); | 1072 | key_count); |
932 | 1073 | ||
933 | hci_dev_lock_bh(hdev); | 1074 | hci_dev_lock(hdev); |
934 | 1075 | ||
935 | hci_link_keys_clear(hdev); | 1076 | hci_link_keys_clear(hdev); |
936 | 1077 | ||
@@ -942,58 +1083,84 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
942 | clear_bit(HCI_DEBUG_KEYS, &hdev->flags); | 1083 | clear_bit(HCI_DEBUG_KEYS, &hdev->flags); |
943 | 1084 | ||
944 | for (i = 0; i < key_count; i++) { | 1085 | for (i = 0; i < key_count; i++) { |
945 | struct mgmt_key_info *key = &cp->keys[i]; | 1086 | struct mgmt_link_key_info *key = &cp->keys[i]; |
946 | 1087 | ||
947 | hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, | 1088 | hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, |
948 | key->pin_len); | 1089 | key->pin_len); |
949 | } | 1090 | } |
950 | 1091 | ||
951 | hci_dev_unlock_bh(hdev); | 1092 | cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0); |
1093 | |||
1094 | hci_dev_unlock(hdev); | ||
952 | hci_dev_put(hdev); | 1095 | hci_dev_put(hdev); |
953 | 1096 | ||
954 | return 0; | 1097 | return 0; |
955 | } | 1098 | } |
956 | 1099 | ||
957 | static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) | 1100 | static int remove_keys(struct sock *sk, u16 index, unsigned char *data, |
1101 | u16 len) | ||
958 | { | 1102 | { |
959 | struct hci_dev *hdev; | 1103 | struct hci_dev *hdev; |
960 | struct mgmt_cp_remove_key *cp; | 1104 | struct mgmt_cp_remove_keys *cp; |
1105 | struct mgmt_rp_remove_keys rp; | ||
1106 | struct hci_cp_disconnect dc; | ||
1107 | struct pending_cmd *cmd; | ||
961 | struct hci_conn *conn; | 1108 | struct hci_conn *conn; |
962 | int err; | 1109 | int err; |
963 | 1110 | ||
964 | cp = (void *) data; | 1111 | cp = (void *) data; |
965 | 1112 | ||
966 | if (len != sizeof(*cp)) | 1113 | if (len != sizeof(*cp)) |
967 | return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); | 1114 | return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, |
1115 | MGMT_STATUS_INVALID_PARAMS); | ||
968 | 1116 | ||
969 | hdev = hci_dev_get(index); | 1117 | hdev = hci_dev_get(index); |
970 | if (!hdev) | 1118 | if (!hdev) |
971 | return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); | 1119 | return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, |
1120 | MGMT_STATUS_INVALID_PARAMS); | ||
972 | 1121 | ||
973 | hci_dev_lock_bh(hdev); | 1122 | hci_dev_lock(hdev); |
1123 | |||
1124 | memset(&rp, 0, sizeof(rp)); | ||
1125 | bacpy(&rp.bdaddr, &cp->bdaddr); | ||
1126 | rp.status = MGMT_STATUS_FAILED; | ||
974 | 1127 | ||
975 | err = hci_remove_link_key(hdev, &cp->bdaddr); | 1128 | err = hci_remove_link_key(hdev, &cp->bdaddr); |
976 | if (err < 0) { | 1129 | if (err < 0) { |
977 | err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); | 1130 | rp.status = MGMT_STATUS_NOT_PAIRED; |
978 | goto unlock; | 1131 | goto unlock; |
979 | } | 1132 | } |
980 | 1133 | ||
981 | err = 0; | 1134 | if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) { |
982 | 1135 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, | |
983 | if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) | 1136 | sizeof(rp)); |
984 | goto unlock; | 1137 | goto unlock; |
1138 | } | ||
985 | 1139 | ||
986 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); | 1140 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); |
987 | if (conn) { | 1141 | if (!conn) { |
988 | struct hci_cp_disconnect dc; | 1142 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, |
1143 | sizeof(rp)); | ||
1144 | goto unlock; | ||
1145 | } | ||
989 | 1146 | ||
990 | put_unaligned_le16(conn->handle, &dc.handle); | 1147 | cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp)); |
991 | dc.reason = 0x13; /* Remote User Terminated Connection */ | 1148 | if (!cmd) { |
992 | err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); | 1149 | err = -ENOMEM; |
1150 | goto unlock; | ||
993 | } | 1151 | } |
994 | 1152 | ||
1153 | put_unaligned_le16(conn->handle, &dc.handle); | ||
1154 | dc.reason = 0x13; /* Remote User Terminated Connection */ | ||
1155 | err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); | ||
1156 | if (err < 0) | ||
1157 | mgmt_pending_remove(cmd); | ||
1158 | |||
995 | unlock: | 1159 | unlock: |
996 | hci_dev_unlock_bh(hdev); | 1160 | if (err < 0) |
1161 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, | ||
1162 | sizeof(rp)); | ||
1163 | hci_dev_unlock(hdev); | ||
997 | hci_dev_put(hdev); | 1164 | hci_dev_put(hdev); |
998 | 1165 | ||
999 | return err; | 1166 | return err; |
@@ -1013,21 +1180,25 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1013 | cp = (void *) data; | 1180 | cp = (void *) data; |
1014 | 1181 | ||
1015 | if (len != sizeof(*cp)) | 1182 | if (len != sizeof(*cp)) |
1016 | return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL); | 1183 | return cmd_status(sk, index, MGMT_OP_DISCONNECT, |
1184 | MGMT_STATUS_INVALID_PARAMS); | ||
1017 | 1185 | ||
1018 | hdev = hci_dev_get(index); | 1186 | hdev = hci_dev_get(index); |
1019 | if (!hdev) | 1187 | if (!hdev) |
1020 | return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); | 1188 | return cmd_status(sk, index, MGMT_OP_DISCONNECT, |
1189 | MGMT_STATUS_INVALID_PARAMS); | ||
1021 | 1190 | ||
1022 | hci_dev_lock_bh(hdev); | 1191 | hci_dev_lock(hdev); |
1023 | 1192 | ||
1024 | if (!test_bit(HCI_UP, &hdev->flags)) { | 1193 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1025 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); | 1194 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, |
1195 | MGMT_STATUS_NOT_POWERED); | ||
1026 | goto failed; | 1196 | goto failed; |
1027 | } | 1197 | } |
1028 | 1198 | ||
1029 | if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { | 1199 | if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { |
1030 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); | 1200 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, |
1201 | MGMT_STATUS_BUSY); | ||
1031 | goto failed; | 1202 | goto failed; |
1032 | } | 1203 | } |
1033 | 1204 | ||
@@ -1036,11 +1207,12 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1036 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); | 1207 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); |
1037 | 1208 | ||
1038 | if (!conn) { | 1209 | if (!conn) { |
1039 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); | 1210 | err = cmd_status(sk, index, MGMT_OP_DISCONNECT, |
1211 | MGMT_STATUS_NOT_CONNECTED); | ||
1040 | goto failed; | 1212 | goto failed; |
1041 | } | 1213 | } |
1042 | 1214 | ||
1043 | cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); | 1215 | cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); |
1044 | if (!cmd) { | 1216 | if (!cmd) { |
1045 | err = -ENOMEM; | 1217 | err = -ENOMEM; |
1046 | goto failed; | 1218 | goto failed; |
@@ -1054,16 +1226,36 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1054 | mgmt_pending_remove(cmd); | 1226 | mgmt_pending_remove(cmd); |
1055 | 1227 | ||
1056 | failed: | 1228 | failed: |
1057 | hci_dev_unlock_bh(hdev); | 1229 | hci_dev_unlock(hdev); |
1058 | hci_dev_put(hdev); | 1230 | hci_dev_put(hdev); |
1059 | 1231 | ||
1060 | return err; | 1232 | return err; |
1061 | } | 1233 | } |
1062 | 1234 | ||
1235 | static u8 link_to_mgmt(u8 link_type, u8 addr_type) | ||
1236 | { | ||
1237 | switch (link_type) { | ||
1238 | case LE_LINK: | ||
1239 | switch (addr_type) { | ||
1240 | case ADDR_LE_DEV_PUBLIC: | ||
1241 | return MGMT_ADDR_LE_PUBLIC; | ||
1242 | case ADDR_LE_DEV_RANDOM: | ||
1243 | return MGMT_ADDR_LE_RANDOM; | ||
1244 | default: | ||
1245 | return MGMT_ADDR_INVALID; | ||
1246 | } | ||
1247 | case ACL_LINK: | ||
1248 | return MGMT_ADDR_BREDR; | ||
1249 | default: | ||
1250 | return MGMT_ADDR_INVALID; | ||
1251 | } | ||
1252 | } | ||
1253 | |||
1063 | static int get_connections(struct sock *sk, u16 index) | 1254 | static int get_connections(struct sock *sk, u16 index) |
1064 | { | 1255 | { |
1065 | struct mgmt_rp_get_connections *rp; | 1256 | struct mgmt_rp_get_connections *rp; |
1066 | struct hci_dev *hdev; | 1257 | struct hci_dev *hdev; |
1258 | struct hci_conn *c; | ||
1067 | struct list_head *p; | 1259 | struct list_head *p; |
1068 | size_t rp_len; | 1260 | size_t rp_len; |
1069 | u16 count; | 1261 | u16 count; |
@@ -1073,16 +1265,17 @@ static int get_connections(struct sock *sk, u16 index) | |||
1073 | 1265 | ||
1074 | hdev = hci_dev_get(index); | 1266 | hdev = hci_dev_get(index); |
1075 | if (!hdev) | 1267 | if (!hdev) |
1076 | return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); | 1268 | return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, |
1269 | MGMT_STATUS_INVALID_PARAMS); | ||
1077 | 1270 | ||
1078 | hci_dev_lock_bh(hdev); | 1271 | hci_dev_lock(hdev); |
1079 | 1272 | ||
1080 | count = 0; | 1273 | count = 0; |
1081 | list_for_each(p, &hdev->conn_hash.list) { | 1274 | list_for_each(p, &hdev->conn_hash.list) { |
1082 | count++; | 1275 | count++; |
1083 | } | 1276 | } |
1084 | 1277 | ||
1085 | rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t)); | 1278 | rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info)); |
1086 | rp = kmalloc(rp_len, GFP_ATOMIC); | 1279 | rp = kmalloc(rp_len, GFP_ATOMIC); |
1087 | if (!rp) { | 1280 | if (!rp) { |
1088 | err = -ENOMEM; | 1281 | err = -ENOMEM; |
@@ -1092,17 +1285,22 @@ static int get_connections(struct sock *sk, u16 index) | |||
1092 | put_unaligned_le16(count, &rp->conn_count); | 1285 | put_unaligned_le16(count, &rp->conn_count); |
1093 | 1286 | ||
1094 | i = 0; | 1287 | i = 0; |
1095 | list_for_each(p, &hdev->conn_hash.list) { | 1288 | list_for_each_entry(c, &hdev->conn_hash.list, list) { |
1096 | struct hci_conn *c = list_entry(p, struct hci_conn, list); | 1289 | bacpy(&rp->addr[i].bdaddr, &c->dst); |
1097 | 1290 | rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); | |
1098 | bacpy(&rp->conn[i++], &c->dst); | 1291 | if (rp->addr[i].type == MGMT_ADDR_INVALID) |
1292 | continue; | ||
1293 | i++; | ||
1099 | } | 1294 | } |
1100 | 1295 | ||
1296 | /* Recalculate length in case of filtered SCO connections, etc */ | ||
1297 | rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); | ||
1298 | |||
1101 | err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); | 1299 | err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); |
1102 | 1300 | ||
1103 | unlock: | 1301 | unlock: |
1104 | kfree(rp); | 1302 | kfree(rp); |
1105 | hci_dev_unlock_bh(hdev); | 1303 | hci_dev_unlock(hdev); |
1106 | hci_dev_put(hdev); | 1304 | hci_dev_put(hdev); |
1107 | return err; | 1305 | return err; |
1108 | } | 1306 | } |
@@ -1113,7 +1311,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index, | |||
1113 | struct pending_cmd *cmd; | 1311 | struct pending_cmd *cmd; |
1114 | int err; | 1312 | int err; |
1115 | 1313 | ||
1116 | cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp, | 1314 | cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, |
1117 | sizeof(*cp)); | 1315 | sizeof(*cp)); |
1118 | if (!cmd) | 1316 | if (!cmd) |
1119 | return -ENOMEM; | 1317 | return -ENOMEM; |
@@ -1142,22 +1340,26 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, | |||
1142 | cp = (void *) data; | 1340 | cp = (void *) data; |
1143 | 1341 | ||
1144 | if (len != sizeof(*cp)) | 1342 | if (len != sizeof(*cp)) |
1145 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL); | 1343 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, |
1344 | MGMT_STATUS_INVALID_PARAMS); | ||
1146 | 1345 | ||
1147 | hdev = hci_dev_get(index); | 1346 | hdev = hci_dev_get(index); |
1148 | if (!hdev) | 1347 | if (!hdev) |
1149 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); | 1348 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, |
1349 | MGMT_STATUS_INVALID_PARAMS); | ||
1150 | 1350 | ||
1151 | hci_dev_lock_bh(hdev); | 1351 | hci_dev_lock(hdev); |
1152 | 1352 | ||
1153 | if (!test_bit(HCI_UP, &hdev->flags)) { | 1353 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1154 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); | 1354 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, |
1355 | MGMT_STATUS_NOT_POWERED); | ||
1155 | goto failed; | 1356 | goto failed; |
1156 | } | 1357 | } |
1157 | 1358 | ||
1158 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); | 1359 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); |
1159 | if (!conn) { | 1360 | if (!conn) { |
1160 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN); | 1361 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, |
1362 | MGMT_STATUS_NOT_CONNECTED); | ||
1161 | goto failed; | 1363 | goto failed; |
1162 | } | 1364 | } |
1163 | 1365 | ||
@@ -1169,12 +1371,12 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, | |||
1169 | err = send_pin_code_neg_reply(sk, index, hdev, &ncp); | 1371 | err = send_pin_code_neg_reply(sk, index, hdev, &ncp); |
1170 | if (err >= 0) | 1372 | if (err >= 0) |
1171 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, | 1373 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, |
1172 | EINVAL); | 1374 | MGMT_STATUS_INVALID_PARAMS); |
1173 | 1375 | ||
1174 | goto failed; | 1376 | goto failed; |
1175 | } | 1377 | } |
1176 | 1378 | ||
1177 | cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); | 1379 | cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); |
1178 | if (!cmd) { | 1380 | if (!cmd) { |
1179 | err = -ENOMEM; | 1381 | err = -ENOMEM; |
1180 | goto failed; | 1382 | goto failed; |
@@ -1189,7 +1391,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, | |||
1189 | mgmt_pending_remove(cmd); | 1391 | mgmt_pending_remove(cmd); |
1190 | 1392 | ||
1191 | failed: | 1393 | failed: |
1192 | hci_dev_unlock_bh(hdev); | 1394 | hci_dev_unlock(hdev); |
1193 | hci_dev_put(hdev); | 1395 | hci_dev_put(hdev); |
1194 | 1396 | ||
1195 | return err; | 1397 | return err; |
@@ -1208,25 +1410,25 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, | |||
1208 | 1410 | ||
1209 | if (len != sizeof(*cp)) | 1411 | if (len != sizeof(*cp)) |
1210 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, | 1412 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, |
1211 | EINVAL); | 1413 | MGMT_STATUS_INVALID_PARAMS); |
1212 | 1414 | ||
1213 | hdev = hci_dev_get(index); | 1415 | hdev = hci_dev_get(index); |
1214 | if (!hdev) | 1416 | if (!hdev) |
1215 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, | 1417 | return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, |
1216 | ENODEV); | 1418 | MGMT_STATUS_INVALID_PARAMS); |
1217 | 1419 | ||
1218 | hci_dev_lock_bh(hdev); | 1420 | hci_dev_lock(hdev); |
1219 | 1421 | ||
1220 | if (!test_bit(HCI_UP, &hdev->flags)) { | 1422 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1221 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, | 1423 | err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, |
1222 | ENETDOWN); | 1424 | MGMT_STATUS_NOT_POWERED); |
1223 | goto failed; | 1425 | goto failed; |
1224 | } | 1426 | } |
1225 | 1427 | ||
1226 | err = send_pin_code_neg_reply(sk, index, hdev, cp); | 1428 | err = send_pin_code_neg_reply(sk, index, hdev, cp); |
1227 | 1429 | ||
1228 | failed: | 1430 | failed: |
1229 | hci_dev_unlock_bh(hdev); | 1431 | hci_dev_unlock(hdev); |
1230 | hci_dev_put(hdev); | 1432 | hci_dev_put(hdev); |
1231 | 1433 | ||
1232 | return err; | 1434 | return err; |
@@ -1243,20 +1445,22 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, | |||
1243 | cp = (void *) data; | 1445 | cp = (void *) data; |
1244 | 1446 | ||
1245 | if (len != sizeof(*cp)) | 1447 | if (len != sizeof(*cp)) |
1246 | return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL); | 1448 | return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, |
1449 | MGMT_STATUS_INVALID_PARAMS); | ||
1247 | 1450 | ||
1248 | hdev = hci_dev_get(index); | 1451 | hdev = hci_dev_get(index); |
1249 | if (!hdev) | 1452 | if (!hdev) |
1250 | return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); | 1453 | return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, |
1454 | MGMT_STATUS_INVALID_PARAMS); | ||
1251 | 1455 | ||
1252 | hci_dev_lock_bh(hdev); | 1456 | hci_dev_lock(hdev); |
1253 | 1457 | ||
1254 | hdev->io_capability = cp->io_capability; | 1458 | hdev->io_capability = cp->io_capability; |
1255 | 1459 | ||
1256 | BT_DBG("%s IO capability set to 0x%02x", hdev->name, | 1460 | BT_DBG("%s IO capability set to 0x%02x", hdev->name, |
1257 | hdev->io_capability); | 1461 | hdev->io_capability); |
1258 | 1462 | ||
1259 | hci_dev_unlock_bh(hdev); | 1463 | hci_dev_unlock(hdev); |
1260 | hci_dev_put(hdev); | 1464 | hci_dev_put(hdev); |
1261 | 1465 | ||
1262 | return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); | 1466 | return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); |
@@ -1265,19 +1469,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, | |||
1265 | static inline struct pending_cmd *find_pairing(struct hci_conn *conn) | 1469 | static inline struct pending_cmd *find_pairing(struct hci_conn *conn) |
1266 | { | 1470 | { |
1267 | struct hci_dev *hdev = conn->hdev; | 1471 | struct hci_dev *hdev = conn->hdev; |
1268 | struct list_head *p; | 1472 | struct pending_cmd *cmd; |
1269 | |||
1270 | list_for_each(p, &cmd_list) { | ||
1271 | struct pending_cmd *cmd; | ||
1272 | |||
1273 | cmd = list_entry(p, struct pending_cmd, list); | ||
1274 | 1473 | ||
1474 | list_for_each_entry(cmd, &hdev->mgmt_pending, list) { | ||
1275 | if (cmd->opcode != MGMT_OP_PAIR_DEVICE) | 1475 | if (cmd->opcode != MGMT_OP_PAIR_DEVICE) |
1276 | continue; | 1476 | continue; |
1277 | 1477 | ||
1278 | if (cmd->index != hdev->id) | ||
1279 | continue; | ||
1280 | |||
1281 | if (cmd->user_data != conn) | 1478 | if (cmd->user_data != conn) |
1282 | continue; | 1479 | continue; |
1283 | 1480 | ||
@@ -1292,7 +1489,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) | |||
1292 | struct mgmt_rp_pair_device rp; | 1489 | struct mgmt_rp_pair_device rp; |
1293 | struct hci_conn *conn = cmd->user_data; | 1490 | struct hci_conn *conn = cmd->user_data; |
1294 | 1491 | ||
1295 | bacpy(&rp.bdaddr, &conn->dst); | 1492 | bacpy(&rp.addr.bdaddr, &conn->dst); |
1493 | rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); | ||
1296 | rp.status = status; | 1494 | rp.status = status; |
1297 | 1495 | ||
1298 | cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); | 1496 | cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); |
@@ -1314,20 +1512,18 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) | |||
1314 | BT_DBG("status %u", status); | 1512 | BT_DBG("status %u", status); |
1315 | 1513 | ||
1316 | cmd = find_pairing(conn); | 1514 | cmd = find_pairing(conn); |
1317 | if (!cmd) { | 1515 | if (!cmd) |
1318 | BT_DBG("Unable to find a pending command"); | 1516 | BT_DBG("Unable to find a pending command"); |
1319 | return; | 1517 | else |
1320 | } | 1518 | pairing_complete(cmd, status); |
1321 | |||
1322 | pairing_complete(cmd, status); | ||
1323 | } | 1519 | } |
1324 | 1520 | ||
1325 | static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) | 1521 | static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) |
1326 | { | 1522 | { |
1327 | struct hci_dev *hdev; | 1523 | struct hci_dev *hdev; |
1328 | struct mgmt_cp_pair_device *cp; | 1524 | struct mgmt_cp_pair_device *cp; |
1525 | struct mgmt_rp_pair_device rp; | ||
1329 | struct pending_cmd *cmd; | 1526 | struct pending_cmd *cmd; |
1330 | struct adv_entry *entry; | ||
1331 | u8 sec_level, auth_type; | 1527 | u8 sec_level, auth_type; |
1332 | struct hci_conn *conn; | 1528 | struct hci_conn *conn; |
1333 | int err; | 1529 | int err; |
@@ -1337,13 +1533,15 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1337 | cp = (void *) data; | 1533 | cp = (void *) data; |
1338 | 1534 | ||
1339 | if (len != sizeof(*cp)) | 1535 | if (len != sizeof(*cp)) |
1340 | return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL); | 1536 | return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, |
1537 | MGMT_STATUS_INVALID_PARAMS); | ||
1341 | 1538 | ||
1342 | hdev = hci_dev_get(index); | 1539 | hdev = hci_dev_get(index); |
1343 | if (!hdev) | 1540 | if (!hdev) |
1344 | return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); | 1541 | return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, |
1542 | MGMT_STATUS_INVALID_PARAMS); | ||
1345 | 1543 | ||
1346 | hci_dev_lock_bh(hdev); | 1544 | hci_dev_lock(hdev); |
1347 | 1545 | ||
1348 | sec_level = BT_SECURITY_MEDIUM; | 1546 | sec_level = BT_SECURITY_MEDIUM; |
1349 | if (cp->io_cap == 0x03) | 1547 | if (cp->io_cap == 0x03) |
@@ -1351,26 +1549,33 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1351 | else | 1549 | else |
1352 | auth_type = HCI_AT_DEDICATED_BONDING_MITM; | 1550 | auth_type = HCI_AT_DEDICATED_BONDING_MITM; |
1353 | 1551 | ||
1354 | entry = hci_find_adv_entry(hdev, &cp->bdaddr); | 1552 | if (cp->addr.type == MGMT_ADDR_BREDR) |
1355 | if (entry) | 1553 | conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, |
1356 | conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level, | ||
1357 | auth_type); | 1554 | auth_type); |
1358 | else | 1555 | else |
1359 | conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, | 1556 | conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, |
1360 | auth_type); | 1557 | auth_type); |
1361 | 1558 | ||
1559 | memset(&rp, 0, sizeof(rp)); | ||
1560 | bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); | ||
1561 | rp.addr.type = cp->addr.type; | ||
1562 | |||
1362 | if (IS_ERR(conn)) { | 1563 | if (IS_ERR(conn)) { |
1363 | err = PTR_ERR(conn); | 1564 | rp.status = -PTR_ERR(conn); |
1565 | err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, | ||
1566 | &rp, sizeof(rp)); | ||
1364 | goto unlock; | 1567 | goto unlock; |
1365 | } | 1568 | } |
1366 | 1569 | ||
1367 | if (conn->connect_cfm_cb) { | 1570 | if (conn->connect_cfm_cb) { |
1368 | hci_conn_put(conn); | 1571 | hci_conn_put(conn); |
1369 | err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY); | 1572 | rp.status = EBUSY; |
1573 | err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, | ||
1574 | &rp, sizeof(rp)); | ||
1370 | goto unlock; | 1575 | goto unlock; |
1371 | } | 1576 | } |
1372 | 1577 | ||
1373 | cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); | 1578 | cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); |
1374 | if (!cmd) { | 1579 | if (!cmd) { |
1375 | err = -ENOMEM; | 1580 | err = -ENOMEM; |
1376 | hci_conn_put(conn); | 1581 | hci_conn_put(conn); |
@@ -1378,7 +1583,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1378 | } | 1583 | } |
1379 | 1584 | ||
1380 | /* For LE, just connecting isn't a proof that the pairing finished */ | 1585 | /* For LE, just connecting isn't a proof that the pairing finished */ |
1381 | if (!entry) | 1586 | if (cp->addr.type == MGMT_ADDR_BREDR) |
1382 | conn->connect_cfm_cb = pairing_complete_cb; | 1587 | conn->connect_cfm_cb = pairing_complete_cb; |
1383 | 1588 | ||
1384 | conn->security_cfm_cb = pairing_complete_cb; | 1589 | conn->security_cfm_cb = pairing_complete_cb; |
@@ -1393,62 +1598,151 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) | |||
1393 | err = 0; | 1598 | err = 0; |
1394 | 1599 | ||
1395 | unlock: | 1600 | unlock: |
1396 | hci_dev_unlock_bh(hdev); | 1601 | hci_dev_unlock(hdev); |
1397 | hci_dev_put(hdev); | 1602 | hci_dev_put(hdev); |
1398 | 1603 | ||
1399 | return err; | 1604 | return err; |
1400 | } | 1605 | } |
1401 | 1606 | ||
1402 | static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, | 1607 | static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr, |
1403 | u16 len, int success) | 1608 | u16 mgmt_op, u16 hci_op, __le32 passkey) |
1404 | { | 1609 | { |
1405 | struct mgmt_cp_user_confirm_reply *cp = (void *) data; | ||
1406 | u16 mgmt_op, hci_op; | ||
1407 | struct pending_cmd *cmd; | 1610 | struct pending_cmd *cmd; |
1408 | struct hci_dev *hdev; | 1611 | struct hci_dev *hdev; |
1612 | struct hci_conn *conn; | ||
1409 | int err; | 1613 | int err; |
1410 | 1614 | ||
1411 | BT_DBG(""); | 1615 | hdev = hci_dev_get(index); |
1616 | if (!hdev) | ||
1617 | return cmd_status(sk, index, mgmt_op, | ||
1618 | MGMT_STATUS_INVALID_PARAMS); | ||
1412 | 1619 | ||
1413 | if (success) { | 1620 | hci_dev_lock(hdev); |
1414 | mgmt_op = MGMT_OP_USER_CONFIRM_REPLY; | 1621 | |
1415 | hci_op = HCI_OP_USER_CONFIRM_REPLY; | 1622 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1416 | } else { | 1623 | err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED); |
1417 | mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY; | 1624 | goto done; |
1418 | hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY; | ||
1419 | } | 1625 | } |
1420 | 1626 | ||
1421 | if (len != sizeof(*cp)) | 1627 | /* |
1422 | return cmd_status(sk, index, mgmt_op, EINVAL); | 1628 | * Check for an existing ACL link, if present pair via |
1629 | * HCI commands. | ||
1630 | * | ||
1631 | * If no ACL link is present, check for an LE link and if | ||
1632 | * present, pair via the SMP engine. | ||
1633 | * | ||
1634 | * If neither ACL nor LE links are present, fail with error. | ||
1635 | */ | ||
1636 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); | ||
1637 | if (!conn) { | ||
1638 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); | ||
1639 | if (!conn) { | ||
1640 | err = cmd_status(sk, index, mgmt_op, | ||
1641 | MGMT_STATUS_NOT_CONNECTED); | ||
1642 | goto done; | ||
1643 | } | ||
1423 | 1644 | ||
1424 | hdev = hci_dev_get(index); | 1645 | /* Continue with pairing via SMP */ |
1425 | if (!hdev) | 1646 | err = smp_user_confirm_reply(conn, mgmt_op, passkey); |
1426 | return cmd_status(sk, index, mgmt_op, ENODEV); | ||
1427 | 1647 | ||
1428 | hci_dev_lock_bh(hdev); | 1648 | if (!err) |
1649 | err = cmd_status(sk, index, mgmt_op, | ||
1650 | MGMT_STATUS_SUCCESS); | ||
1651 | else | ||
1652 | err = cmd_status(sk, index, mgmt_op, | ||
1653 | MGMT_STATUS_FAILED); | ||
1429 | 1654 | ||
1430 | if (!test_bit(HCI_UP, &hdev->flags)) { | 1655 | goto done; |
1431 | err = cmd_status(sk, index, mgmt_op, ENETDOWN); | ||
1432 | goto failed; | ||
1433 | } | 1656 | } |
1434 | 1657 | ||
1435 | cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); | 1658 | cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr)); |
1436 | if (!cmd) { | 1659 | if (!cmd) { |
1437 | err = -ENOMEM; | 1660 | err = -ENOMEM; |
1438 | goto failed; | 1661 | goto done; |
1439 | } | 1662 | } |
1440 | 1663 | ||
1441 | err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr); | 1664 | /* Continue with pairing via HCI */ |
1665 | if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { | ||
1666 | struct hci_cp_user_passkey_reply cp; | ||
1667 | |||
1668 | bacpy(&cp.bdaddr, bdaddr); | ||
1669 | cp.passkey = passkey; | ||
1670 | err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); | ||
1671 | } else | ||
1672 | err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr); | ||
1673 | |||
1442 | if (err < 0) | 1674 | if (err < 0) |
1443 | mgmt_pending_remove(cmd); | 1675 | mgmt_pending_remove(cmd); |
1444 | 1676 | ||
1445 | failed: | 1677 | done: |
1446 | hci_dev_unlock_bh(hdev); | 1678 | hci_dev_unlock(hdev); |
1447 | hci_dev_put(hdev); | 1679 | hci_dev_put(hdev); |
1448 | 1680 | ||
1449 | return err; | 1681 | return err; |
1450 | } | 1682 | } |
1451 | 1683 | ||
1684 | static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len) | ||
1685 | { | ||
1686 | struct mgmt_cp_user_confirm_reply *cp = (void *) data; | ||
1687 | |||
1688 | BT_DBG(""); | ||
1689 | |||
1690 | if (len != sizeof(*cp)) | ||
1691 | return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY, | ||
1692 | MGMT_STATUS_INVALID_PARAMS); | ||
1693 | |||
1694 | return user_pairing_resp(sk, index, &cp->bdaddr, | ||
1695 | MGMT_OP_USER_CONFIRM_REPLY, | ||
1696 | HCI_OP_USER_CONFIRM_REPLY, 0); | ||
1697 | } | ||
1698 | |||
1699 | static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data, | ||
1700 | u16 len) | ||
1701 | { | ||
1702 | struct mgmt_cp_user_confirm_neg_reply *cp = data; | ||
1703 | |||
1704 | BT_DBG(""); | ||
1705 | |||
1706 | if (len != sizeof(*cp)) | ||
1707 | return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY, | ||
1708 | MGMT_STATUS_INVALID_PARAMS); | ||
1709 | |||
1710 | return user_pairing_resp(sk, index, &cp->bdaddr, | ||
1711 | MGMT_OP_USER_CONFIRM_NEG_REPLY, | ||
1712 | HCI_OP_USER_CONFIRM_NEG_REPLY, 0); | ||
1713 | } | ||
1714 | |||
1715 | static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len) | ||
1716 | { | ||
1717 | struct mgmt_cp_user_passkey_reply *cp = (void *) data; | ||
1718 | |||
1719 | BT_DBG(""); | ||
1720 | |||
1721 | if (len != sizeof(*cp)) | ||
1722 | return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY, | ||
1723 | EINVAL); | ||
1724 | |||
1725 | return user_pairing_resp(sk, index, &cp->bdaddr, | ||
1726 | MGMT_OP_USER_PASSKEY_REPLY, | ||
1727 | HCI_OP_USER_PASSKEY_REPLY, cp->passkey); | ||
1728 | } | ||
1729 | |||
1730 | static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data, | ||
1731 | u16 len) | ||
1732 | { | ||
1733 | struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data; | ||
1734 | |||
1735 | BT_DBG(""); | ||
1736 | |||
1737 | if (len != sizeof(*cp)) | ||
1738 | return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY, | ||
1739 | EINVAL); | ||
1740 | |||
1741 | return user_pairing_resp(sk, index, &cp->bdaddr, | ||
1742 | MGMT_OP_USER_PASSKEY_NEG_REPLY, | ||
1743 | HCI_OP_USER_PASSKEY_NEG_REPLY, 0); | ||
1744 | } | ||
1745 | |||
1452 | static int set_local_name(struct sock *sk, u16 index, unsigned char *data, | 1746 | static int set_local_name(struct sock *sk, u16 index, unsigned char *data, |
1453 | u16 len) | 1747 | u16 len) |
1454 | { | 1748 | { |
@@ -1461,15 +1755,17 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, | |||
1461 | BT_DBG(""); | 1755 | BT_DBG(""); |
1462 | 1756 | ||
1463 | if (len != sizeof(*mgmt_cp)) | 1757 | if (len != sizeof(*mgmt_cp)) |
1464 | return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL); | 1758 | return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, |
1759 | MGMT_STATUS_INVALID_PARAMS); | ||
1465 | 1760 | ||
1466 | hdev = hci_dev_get(index); | 1761 | hdev = hci_dev_get(index); |
1467 | if (!hdev) | 1762 | if (!hdev) |
1468 | return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); | 1763 | return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, |
1764 | MGMT_STATUS_INVALID_PARAMS); | ||
1469 | 1765 | ||
1470 | hci_dev_lock_bh(hdev); | 1766 | hci_dev_lock(hdev); |
1471 | 1767 | ||
1472 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); | 1768 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); |
1473 | if (!cmd) { | 1769 | if (!cmd) { |
1474 | err = -ENOMEM; | 1770 | err = -ENOMEM; |
1475 | goto failed; | 1771 | goto failed; |
@@ -1482,7 +1778,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, | |||
1482 | mgmt_pending_remove(cmd); | 1778 | mgmt_pending_remove(cmd); |
1483 | 1779 | ||
1484 | failed: | 1780 | failed: |
1485 | hci_dev_unlock_bh(hdev); | 1781 | hci_dev_unlock(hdev); |
1486 | hci_dev_put(hdev); | 1782 | hci_dev_put(hdev); |
1487 | 1783 | ||
1488 | return err; | 1784 | return err; |
@@ -1499,28 +1795,29 @@ static int read_local_oob_data(struct sock *sk, u16 index) | |||
1499 | hdev = hci_dev_get(index); | 1795 | hdev = hci_dev_get(index); |
1500 | if (!hdev) | 1796 | if (!hdev) |
1501 | return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, | 1797 | return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, |
1502 | ENODEV); | 1798 | MGMT_STATUS_INVALID_PARAMS); |
1503 | 1799 | ||
1504 | hci_dev_lock_bh(hdev); | 1800 | hci_dev_lock(hdev); |
1505 | 1801 | ||
1506 | if (!test_bit(HCI_UP, &hdev->flags)) { | 1802 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1507 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, | 1803 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, |
1508 | ENETDOWN); | 1804 | MGMT_STATUS_NOT_POWERED); |
1509 | goto unlock; | 1805 | goto unlock; |
1510 | } | 1806 | } |
1511 | 1807 | ||
1512 | if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { | 1808 | if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { |
1513 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, | 1809 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, |
1514 | EOPNOTSUPP); | 1810 | MGMT_STATUS_NOT_SUPPORTED); |
1515 | goto unlock; | 1811 | goto unlock; |
1516 | } | 1812 | } |
1517 | 1813 | ||
1518 | if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) { | 1814 | if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { |
1519 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY); | 1815 | err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, |
1816 | MGMT_STATUS_BUSY); | ||
1520 | goto unlock; | 1817 | goto unlock; |
1521 | } | 1818 | } |
1522 | 1819 | ||
1523 | cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0); | 1820 | cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); |
1524 | if (!cmd) { | 1821 | if (!cmd) { |
1525 | err = -ENOMEM; | 1822 | err = -ENOMEM; |
1526 | goto unlock; | 1823 | goto unlock; |
@@ -1531,7 +1828,7 @@ static int read_local_oob_data(struct sock *sk, u16 index) | |||
1531 | mgmt_pending_remove(cmd); | 1828 | mgmt_pending_remove(cmd); |
1532 | 1829 | ||
1533 | unlock: | 1830 | unlock: |
1534 | hci_dev_unlock_bh(hdev); | 1831 | hci_dev_unlock(hdev); |
1535 | hci_dev_put(hdev); | 1832 | hci_dev_put(hdev); |
1536 | 1833 | ||
1537 | return err; | 1834 | return err; |
@@ -1548,24 +1845,25 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, | |||
1548 | 1845 | ||
1549 | if (len != sizeof(*cp)) | 1846 | if (len != sizeof(*cp)) |
1550 | return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, | 1847 | return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, |
1551 | EINVAL); | 1848 | MGMT_STATUS_INVALID_PARAMS); |
1552 | 1849 | ||
1553 | hdev = hci_dev_get(index); | 1850 | hdev = hci_dev_get(index); |
1554 | if (!hdev) | 1851 | if (!hdev) |
1555 | return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, | 1852 | return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, |
1556 | ENODEV); | 1853 | MGMT_STATUS_INVALID_PARAMS); |
1557 | 1854 | ||
1558 | hci_dev_lock_bh(hdev); | 1855 | hci_dev_lock(hdev); |
1559 | 1856 | ||
1560 | err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, | 1857 | err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, |
1561 | cp->randomizer); | 1858 | cp->randomizer); |
1562 | if (err < 0) | 1859 | if (err < 0) |
1563 | err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err); | 1860 | err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, |
1861 | MGMT_STATUS_FAILED); | ||
1564 | else | 1862 | else |
1565 | err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, | 1863 | err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, |
1566 | 0); | 1864 | 0); |
1567 | 1865 | ||
1568 | hci_dev_unlock_bh(hdev); | 1866 | hci_dev_unlock(hdev); |
1569 | hci_dev_put(hdev); | 1867 | hci_dev_put(hdev); |
1570 | 1868 | ||
1571 | return err; | 1869 | return err; |
@@ -1582,62 +1880,68 @@ static int remove_remote_oob_data(struct sock *sk, u16 index, | |||
1582 | 1880 | ||
1583 | if (len != sizeof(*cp)) | 1881 | if (len != sizeof(*cp)) |
1584 | return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, | 1882 | return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, |
1585 | EINVAL); | 1883 | MGMT_STATUS_INVALID_PARAMS); |
1586 | 1884 | ||
1587 | hdev = hci_dev_get(index); | 1885 | hdev = hci_dev_get(index); |
1588 | if (!hdev) | 1886 | if (!hdev) |
1589 | return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, | 1887 | return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, |
1590 | ENODEV); | 1888 | MGMT_STATUS_INVALID_PARAMS); |
1591 | 1889 | ||
1592 | hci_dev_lock_bh(hdev); | 1890 | hci_dev_lock(hdev); |
1593 | 1891 | ||
1594 | err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); | 1892 | err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); |
1595 | if (err < 0) | 1893 | if (err < 0) |
1596 | err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, | 1894 | err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, |
1597 | -err); | 1895 | MGMT_STATUS_INVALID_PARAMS); |
1598 | else | 1896 | else |
1599 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, | 1897 | err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, |
1600 | NULL, 0); | 1898 | NULL, 0); |
1601 | 1899 | ||
1602 | hci_dev_unlock_bh(hdev); | 1900 | hci_dev_unlock(hdev); |
1603 | hci_dev_put(hdev); | 1901 | hci_dev_put(hdev); |
1604 | 1902 | ||
1605 | return err; | 1903 | return err; |
1606 | } | 1904 | } |
1607 | 1905 | ||
1608 | static int start_discovery(struct sock *sk, u16 index) | 1906 | static int start_discovery(struct sock *sk, u16 index, |
1907 | unsigned char *data, u16 len) | ||
1609 | { | 1908 | { |
1610 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; | 1909 | struct mgmt_cp_start_discovery *cp = (void *) data; |
1611 | struct hci_cp_inquiry cp; | ||
1612 | struct pending_cmd *cmd; | 1910 | struct pending_cmd *cmd; |
1613 | struct hci_dev *hdev; | 1911 | struct hci_dev *hdev; |
1614 | int err; | 1912 | int err; |
1615 | 1913 | ||
1616 | BT_DBG("hci%u", index); | 1914 | BT_DBG("hci%u", index); |
1617 | 1915 | ||
1916 | if (len != sizeof(*cp)) | ||
1917 | return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, | ||
1918 | MGMT_STATUS_INVALID_PARAMS); | ||
1919 | |||
1618 | hdev = hci_dev_get(index); | 1920 | hdev = hci_dev_get(index); |
1619 | if (!hdev) | 1921 | if (!hdev) |
1620 | return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV); | 1922 | return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, |
1923 | MGMT_STATUS_INVALID_PARAMS); | ||
1621 | 1924 | ||
1622 | hci_dev_lock_bh(hdev); | 1925 | hci_dev_lock(hdev); |
1623 | 1926 | ||
1624 | cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0); | 1927 | if (!test_bit(HCI_UP, &hdev->flags)) { |
1928 | err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, | ||
1929 | MGMT_STATUS_NOT_POWERED); | ||
1930 | goto failed; | ||
1931 | } | ||
1932 | |||
1933 | cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0); | ||
1625 | if (!cmd) { | 1934 | if (!cmd) { |
1626 | err = -ENOMEM; | 1935 | err = -ENOMEM; |
1627 | goto failed; | 1936 | goto failed; |
1628 | } | 1937 | } |
1629 | 1938 | ||
1630 | memset(&cp, 0, sizeof(cp)); | 1939 | err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); |
1631 | memcpy(&cp.lap, lap, 3); | ||
1632 | cp.length = 0x08; | ||
1633 | cp.num_rsp = 0x00; | ||
1634 | |||
1635 | err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); | ||
1636 | if (err < 0) | 1940 | if (err < 0) |
1637 | mgmt_pending_remove(cmd); | 1941 | mgmt_pending_remove(cmd); |
1638 | 1942 | ||
1639 | failed: | 1943 | failed: |
1640 | hci_dev_unlock_bh(hdev); | 1944 | hci_dev_unlock(hdev); |
1641 | hci_dev_put(hdev); | 1945 | hci_dev_put(hdev); |
1642 | 1946 | ||
1643 | return err; | 1947 | return err; |
@@ -1653,22 +1957,23 @@ static int stop_discovery(struct sock *sk, u16 index) | |||
1653 | 1957 | ||
1654 | hdev = hci_dev_get(index); | 1958 | hdev = hci_dev_get(index); |
1655 | if (!hdev) | 1959 | if (!hdev) |
1656 | return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV); | 1960 | return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, |
1961 | MGMT_STATUS_INVALID_PARAMS); | ||
1657 | 1962 | ||
1658 | hci_dev_lock_bh(hdev); | 1963 | hci_dev_lock(hdev); |
1659 | 1964 | ||
1660 | cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0); | 1965 | cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); |
1661 | if (!cmd) { | 1966 | if (!cmd) { |
1662 | err = -ENOMEM; | 1967 | err = -ENOMEM; |
1663 | goto failed; | 1968 | goto failed; |
1664 | } | 1969 | } |
1665 | 1970 | ||
1666 | err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); | 1971 | err = hci_cancel_inquiry(hdev); |
1667 | if (err < 0) | 1972 | if (err < 0) |
1668 | mgmt_pending_remove(cmd); | 1973 | mgmt_pending_remove(cmd); |
1669 | 1974 | ||
1670 | failed: | 1975 | failed: |
1671 | hci_dev_unlock_bh(hdev); | 1976 | hci_dev_unlock(hdev); |
1672 | hci_dev_put(hdev); | 1977 | hci_dev_put(hdev); |
1673 | 1978 | ||
1674 | return err; | 1979 | return err; |
@@ -1678,7 +1983,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, | |||
1678 | u16 len) | 1983 | u16 len) |
1679 | { | 1984 | { |
1680 | struct hci_dev *hdev; | 1985 | struct hci_dev *hdev; |
1681 | struct pending_cmd *cmd; | ||
1682 | struct mgmt_cp_block_device *cp = (void *) data; | 1986 | struct mgmt_cp_block_device *cp = (void *) data; |
1683 | int err; | 1987 | int err; |
1684 | 1988 | ||
@@ -1686,33 +1990,24 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, | |||
1686 | 1990 | ||
1687 | if (len != sizeof(*cp)) | 1991 | if (len != sizeof(*cp)) |
1688 | return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, | 1992 | return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, |
1689 | EINVAL); | 1993 | MGMT_STATUS_INVALID_PARAMS); |
1690 | 1994 | ||
1691 | hdev = hci_dev_get(index); | 1995 | hdev = hci_dev_get(index); |
1692 | if (!hdev) | 1996 | if (!hdev) |
1693 | return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, | 1997 | return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, |
1694 | ENODEV); | 1998 | MGMT_STATUS_INVALID_PARAMS); |
1695 | 1999 | ||
1696 | hci_dev_lock_bh(hdev); | 2000 | hci_dev_lock(hdev); |
1697 | |||
1698 | cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0); | ||
1699 | if (!cmd) { | ||
1700 | err = -ENOMEM; | ||
1701 | goto failed; | ||
1702 | } | ||
1703 | 2001 | ||
1704 | err = hci_blacklist_add(hdev, &cp->bdaddr); | 2002 | err = hci_blacklist_add(hdev, &cp->bdaddr); |
1705 | |||
1706 | if (err < 0) | 2003 | if (err < 0) |
1707 | err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err); | 2004 | err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, |
2005 | MGMT_STATUS_FAILED); | ||
1708 | else | 2006 | else |
1709 | err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, | 2007 | err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, |
1710 | NULL, 0); | 2008 | NULL, 0); |
1711 | 2009 | ||
1712 | mgmt_pending_remove(cmd); | 2010 | hci_dev_unlock(hdev); |
1713 | |||
1714 | failed: | ||
1715 | hci_dev_unlock_bh(hdev); | ||
1716 | hci_dev_put(hdev); | 2011 | hci_dev_put(hdev); |
1717 | 2012 | ||
1718 | return err; | 2013 | return err; |
@@ -1722,7 +2017,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, | |||
1722 | u16 len) | 2017 | u16 len) |
1723 | { | 2018 | { |
1724 | struct hci_dev *hdev; | 2019 | struct hci_dev *hdev; |
1725 | struct pending_cmd *cmd; | ||
1726 | struct mgmt_cp_unblock_device *cp = (void *) data; | 2020 | struct mgmt_cp_unblock_device *cp = (void *) data; |
1727 | int err; | 2021 | int err; |
1728 | 2022 | ||
@@ -1730,33 +2024,25 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, | |||
1730 | 2024 | ||
1731 | if (len != sizeof(*cp)) | 2025 | if (len != sizeof(*cp)) |
1732 | return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, | 2026 | return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, |
1733 | EINVAL); | 2027 | MGMT_STATUS_INVALID_PARAMS); |
1734 | 2028 | ||
1735 | hdev = hci_dev_get(index); | 2029 | hdev = hci_dev_get(index); |
1736 | if (!hdev) | 2030 | if (!hdev) |
1737 | return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, | 2031 | return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, |
1738 | ENODEV); | 2032 | MGMT_STATUS_INVALID_PARAMS); |
1739 | 2033 | ||
1740 | hci_dev_lock_bh(hdev); | 2034 | hci_dev_lock(hdev); |
1741 | |||
1742 | cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0); | ||
1743 | if (!cmd) { | ||
1744 | err = -ENOMEM; | ||
1745 | goto failed; | ||
1746 | } | ||
1747 | 2035 | ||
1748 | err = hci_blacklist_del(hdev, &cp->bdaddr); | 2036 | err = hci_blacklist_del(hdev, &cp->bdaddr); |
1749 | 2037 | ||
1750 | if (err < 0) | 2038 | if (err < 0) |
1751 | err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err); | 2039 | err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, |
2040 | MGMT_STATUS_INVALID_PARAMS); | ||
1752 | else | 2041 | else |
1753 | err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, | 2042 | err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, |
1754 | NULL, 0); | 2043 | NULL, 0); |
1755 | 2044 | ||
1756 | mgmt_pending_remove(cmd); | 2045 | hci_dev_unlock(hdev); |
1757 | |||
1758 | failed: | ||
1759 | hci_dev_unlock_bh(hdev); | ||
1760 | hci_dev_put(hdev); | 2046 | hci_dev_put(hdev); |
1761 | 2047 | ||
1762 | return err; | 2048 | return err; |
@@ -1766,7 +2052,7 @@ static int set_fast_connectable(struct sock *sk, u16 index, | |||
1766 | unsigned char *data, u16 len) | 2052 | unsigned char *data, u16 len) |
1767 | { | 2053 | { |
1768 | struct hci_dev *hdev; | 2054 | struct hci_dev *hdev; |
1769 | struct mgmt_cp_set_fast_connectable *cp = (void *) data; | 2055 | struct mgmt_mode *cp = (void *) data; |
1770 | struct hci_cp_write_page_scan_activity acp; | 2056 | struct hci_cp_write_page_scan_activity acp; |
1771 | u8 type; | 2057 | u8 type; |
1772 | int err; | 2058 | int err; |
@@ -1775,16 +2061,16 @@ static int set_fast_connectable(struct sock *sk, u16 index, | |||
1775 | 2061 | ||
1776 | if (len != sizeof(*cp)) | 2062 | if (len != sizeof(*cp)) |
1777 | return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, | 2063 | return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, |
1778 | EINVAL); | 2064 | MGMT_STATUS_INVALID_PARAMS); |
1779 | 2065 | ||
1780 | hdev = hci_dev_get(index); | 2066 | hdev = hci_dev_get(index); |
1781 | if (!hdev) | 2067 | if (!hdev) |
1782 | return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, | 2068 | return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, |
1783 | ENODEV); | 2069 | MGMT_STATUS_INVALID_PARAMS); |
1784 | 2070 | ||
1785 | hci_dev_lock(hdev); | 2071 | hci_dev_lock(hdev); |
1786 | 2072 | ||
1787 | if (cp->enable) { | 2073 | if (cp->val) { |
1788 | type = PAGE_SCAN_TYPE_INTERLACED; | 2074 | type = PAGE_SCAN_TYPE_INTERLACED; |
1789 | acp.interval = 0x0024; /* 22.5 msec page scan interval */ | 2075 | acp.interval = 0x0024; /* 22.5 msec page scan interval */ |
1790 | } else { | 2076 | } else { |
@@ -1798,14 +2084,14 @@ static int set_fast_connectable(struct sock *sk, u16 index, | |||
1798 | sizeof(acp), &acp); | 2084 | sizeof(acp), &acp); |
1799 | if (err < 0) { | 2085 | if (err < 0) { |
1800 | err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, | 2086 | err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, |
1801 | -err); | 2087 | MGMT_STATUS_FAILED); |
1802 | goto done; | 2088 | goto done; |
1803 | } | 2089 | } |
1804 | 2090 | ||
1805 | err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | 2091 | err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); |
1806 | if (err < 0) { | 2092 | if (err < 0) { |
1807 | err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, | 2093 | err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, |
1808 | -err); | 2094 | MGMT_STATUS_FAILED); |
1809 | goto done; | 2095 | goto done; |
1810 | } | 2096 | } |
1811 | 2097 | ||
@@ -1868,6 +2154,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
1868 | case MGMT_OP_SET_CONNECTABLE: | 2154 | case MGMT_OP_SET_CONNECTABLE: |
1869 | err = set_connectable(sk, index, buf + sizeof(*hdr), len); | 2155 | err = set_connectable(sk, index, buf + sizeof(*hdr), len); |
1870 | break; | 2156 | break; |
2157 | case MGMT_OP_SET_FAST_CONNECTABLE: | ||
2158 | err = set_fast_connectable(sk, index, buf + sizeof(*hdr), | ||
2159 | len); | ||
2160 | break; | ||
1871 | case MGMT_OP_SET_PAIRABLE: | 2161 | case MGMT_OP_SET_PAIRABLE: |
1872 | err = set_pairable(sk, index, buf + sizeof(*hdr), len); | 2162 | err = set_pairable(sk, index, buf + sizeof(*hdr), len); |
1873 | break; | 2163 | break; |
@@ -1880,14 +2170,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
1880 | case MGMT_OP_SET_DEV_CLASS: | 2170 | case MGMT_OP_SET_DEV_CLASS: |
1881 | err = set_dev_class(sk, index, buf + sizeof(*hdr), len); | 2171 | err = set_dev_class(sk, index, buf + sizeof(*hdr), len); |
1882 | break; | 2172 | break; |
1883 | case MGMT_OP_SET_SERVICE_CACHE: | 2173 | case MGMT_OP_LOAD_LINK_KEYS: |
1884 | err = set_service_cache(sk, index, buf + sizeof(*hdr), len); | 2174 | err = load_link_keys(sk, index, buf + sizeof(*hdr), len); |
1885 | break; | ||
1886 | case MGMT_OP_LOAD_KEYS: | ||
1887 | err = load_keys(sk, index, buf + sizeof(*hdr), len); | ||
1888 | break; | 2175 | break; |
1889 | case MGMT_OP_REMOVE_KEY: | 2176 | case MGMT_OP_REMOVE_KEYS: |
1890 | err = remove_key(sk, index, buf + sizeof(*hdr), len); | 2177 | err = remove_keys(sk, index, buf + sizeof(*hdr), len); |
1891 | break; | 2178 | break; |
1892 | case MGMT_OP_DISCONNECT: | 2179 | case MGMT_OP_DISCONNECT: |
1893 | err = disconnect(sk, index, buf + sizeof(*hdr), len); | 2180 | err = disconnect(sk, index, buf + sizeof(*hdr), len); |
@@ -1908,10 +2195,18 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
1908 | err = pair_device(sk, index, buf + sizeof(*hdr), len); | 2195 | err = pair_device(sk, index, buf + sizeof(*hdr), len); |
1909 | break; | 2196 | break; |
1910 | case MGMT_OP_USER_CONFIRM_REPLY: | 2197 | case MGMT_OP_USER_CONFIRM_REPLY: |
1911 | err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1); | 2198 | err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len); |
1912 | break; | 2199 | break; |
1913 | case MGMT_OP_USER_CONFIRM_NEG_REPLY: | 2200 | case MGMT_OP_USER_CONFIRM_NEG_REPLY: |
1914 | err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); | 2201 | err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr), |
2202 | len); | ||
2203 | break; | ||
2204 | case MGMT_OP_USER_PASSKEY_REPLY: | ||
2205 | err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len); | ||
2206 | break; | ||
2207 | case MGMT_OP_USER_PASSKEY_NEG_REPLY: | ||
2208 | err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr), | ||
2209 | len); | ||
1915 | break; | 2210 | break; |
1916 | case MGMT_OP_SET_LOCAL_NAME: | 2211 | case MGMT_OP_SET_LOCAL_NAME: |
1917 | err = set_local_name(sk, index, buf + sizeof(*hdr), len); | 2212 | err = set_local_name(sk, index, buf + sizeof(*hdr), len); |
@@ -1927,7 +2222,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
1927 | len); | 2222 | len); |
1928 | break; | 2223 | break; |
1929 | case MGMT_OP_START_DISCOVERY: | 2224 | case MGMT_OP_START_DISCOVERY: |
1930 | err = start_discovery(sk, index); | 2225 | err = start_discovery(sk, index, buf + sizeof(*hdr), len); |
1931 | break; | 2226 | break; |
1932 | case MGMT_OP_STOP_DISCOVERY: | 2227 | case MGMT_OP_STOP_DISCOVERY: |
1933 | err = stop_discovery(sk, index); | 2228 | err = stop_discovery(sk, index); |
@@ -1938,13 +2233,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) | |||
1938 | case MGMT_OP_UNBLOCK_DEVICE: | 2233 | case MGMT_OP_UNBLOCK_DEVICE: |
1939 | err = unblock_device(sk, index, buf + sizeof(*hdr), len); | 2234 | err = unblock_device(sk, index, buf + sizeof(*hdr), len); |
1940 | break; | 2235 | break; |
1941 | case MGMT_OP_SET_FAST_CONNECTABLE: | ||
1942 | err = set_fast_connectable(sk, index, buf + sizeof(*hdr), | ||
1943 | len); | ||
1944 | break; | ||
1945 | default: | 2236 | default: |
1946 | BT_DBG("Unknown op %u", opcode); | 2237 | BT_DBG("Unknown op %u", opcode); |
1947 | err = cmd_status(sk, index, opcode, 0x01); | 2238 | err = cmd_status(sk, index, opcode, |
2239 | MGMT_STATUS_UNKNOWN_COMMAND); | ||
1948 | break; | 2240 | break; |
1949 | } | 2241 | } |
1950 | 2242 | ||
@@ -1958,30 +2250,39 @@ done: | |||
1958 | return err; | 2250 | return err; |
1959 | } | 2251 | } |
1960 | 2252 | ||
1961 | int mgmt_index_added(u16 index) | 2253 | static void cmd_status_rsp(struct pending_cmd *cmd, void *data) |
1962 | { | 2254 | { |
1963 | return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); | 2255 | u8 *status = data; |
2256 | |||
2257 | cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); | ||
2258 | mgmt_pending_remove(cmd); | ||
1964 | } | 2259 | } |
1965 | 2260 | ||
1966 | int mgmt_index_removed(u16 index) | 2261 | int mgmt_index_added(struct hci_dev *hdev) |
1967 | { | 2262 | { |
1968 | return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); | 2263 | return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); |
2264 | } | ||
2265 | |||
2266 | int mgmt_index_removed(struct hci_dev *hdev) | ||
2267 | { | ||
2268 | u8 status = ENODEV; | ||
2269 | |||
2270 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); | ||
2271 | |||
2272 | return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); | ||
1969 | } | 2273 | } |
1970 | 2274 | ||
1971 | struct cmd_lookup { | 2275 | struct cmd_lookup { |
1972 | u8 val; | 2276 | u8 val; |
1973 | struct sock *sk; | 2277 | struct sock *sk; |
2278 | struct hci_dev *hdev; | ||
1974 | }; | 2279 | }; |
1975 | 2280 | ||
1976 | static void mode_rsp(struct pending_cmd *cmd, void *data) | 2281 | static void settings_rsp(struct pending_cmd *cmd, void *data) |
1977 | { | 2282 | { |
1978 | struct mgmt_mode *cp = cmd->param; | ||
1979 | struct cmd_lookup *match = data; | 2283 | struct cmd_lookup *match = data; |
1980 | 2284 | ||
1981 | if (cp->val != match->val) | 2285 | send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); |
1982 | return; | ||
1983 | |||
1984 | send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val); | ||
1985 | 2286 | ||
1986 | list_del(&cmd->list); | 2287 | list_del(&cmd->list); |
1987 | 2288 | ||
@@ -1993,17 +2294,23 @@ static void mode_rsp(struct pending_cmd *cmd, void *data) | |||
1993 | mgmt_pending_free(cmd); | 2294 | mgmt_pending_free(cmd); |
1994 | } | 2295 | } |
1995 | 2296 | ||
1996 | int mgmt_powered(u16 index, u8 powered) | 2297 | int mgmt_powered(struct hci_dev *hdev, u8 powered) |
1997 | { | 2298 | { |
1998 | struct mgmt_mode ev; | 2299 | struct cmd_lookup match = { powered, NULL, hdev }; |
1999 | struct cmd_lookup match = { powered, NULL }; | 2300 | __le32 ev; |
2000 | int ret; | 2301 | int ret; |
2001 | 2302 | ||
2002 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); | 2303 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); |
2003 | 2304 | ||
2004 | ev.val = powered; | 2305 | if (!powered) { |
2306 | u8 status = ENETDOWN; | ||
2307 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); | ||
2308 | } | ||
2309 | |||
2310 | ev = cpu_to_le32(get_current_settings(hdev)); | ||
2005 | 2311 | ||
2006 | ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); | 2312 | ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), |
2313 | match.sk); | ||
2007 | 2314 | ||
2008 | if (match.sk) | 2315 | if (match.sk) |
2009 | sock_put(match.sk); | 2316 | sock_put(match.sk); |
@@ -2011,36 +2318,36 @@ int mgmt_powered(u16 index, u8 powered) | |||
2011 | return ret; | 2318 | return ret; |
2012 | } | 2319 | } |
2013 | 2320 | ||
2014 | int mgmt_discoverable(u16 index, u8 discoverable) | 2321 | int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) |
2015 | { | 2322 | { |
2016 | struct mgmt_mode ev; | 2323 | struct cmd_lookup match = { discoverable, NULL, hdev }; |
2017 | struct cmd_lookup match = { discoverable, NULL }; | 2324 | __le32 ev; |
2018 | int ret; | 2325 | int ret; |
2019 | 2326 | ||
2020 | mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); | 2327 | mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match); |
2021 | 2328 | ||
2022 | ev.val = discoverable; | 2329 | ev = cpu_to_le32(get_current_settings(hdev)); |
2023 | 2330 | ||
2024 | ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), | 2331 | ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), |
2025 | match.sk); | 2332 | match.sk); |
2026 | |||
2027 | if (match.sk) | 2333 | if (match.sk) |
2028 | sock_put(match.sk); | 2334 | sock_put(match.sk); |
2029 | 2335 | ||
2030 | return ret; | 2336 | return ret; |
2031 | } | 2337 | } |
2032 | 2338 | ||
2033 | int mgmt_connectable(u16 index, u8 connectable) | 2339 | int mgmt_connectable(struct hci_dev *hdev, u8 connectable) |
2034 | { | 2340 | { |
2035 | struct mgmt_mode ev; | 2341 | __le32 ev; |
2036 | struct cmd_lookup match = { connectable, NULL }; | 2342 | struct cmd_lookup match = { connectable, NULL, hdev }; |
2037 | int ret; | 2343 | int ret; |
2038 | 2344 | ||
2039 | mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); | 2345 | mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, |
2346 | &match); | ||
2040 | 2347 | ||
2041 | ev.val = connectable; | 2348 | ev = cpu_to_le32(get_current_settings(hdev)); |
2042 | 2349 | ||
2043 | ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); | 2350 | ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk); |
2044 | 2351 | ||
2045 | if (match.sk) | 2352 | if (match.sk) |
2046 | sock_put(match.sk); | 2353 | sock_put(match.sk); |
@@ -2048,9 +2355,25 @@ int mgmt_connectable(u16 index, u8 connectable) | |||
2048 | return ret; | 2355 | return ret; |
2049 | } | 2356 | } |
2050 | 2357 | ||
2051 | int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) | 2358 | int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) |
2359 | { | ||
2360 | u8 mgmt_err = mgmt_status(status); | ||
2361 | |||
2362 | if (scan & SCAN_PAGE) | ||
2363 | mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, | ||
2364 | cmd_status_rsp, &mgmt_err); | ||
2365 | |||
2366 | if (scan & SCAN_INQUIRY) | ||
2367 | mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, | ||
2368 | cmd_status_rsp, &mgmt_err); | ||
2369 | |||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, | ||
2374 | u8 persistent) | ||
2052 | { | 2375 | { |
2053 | struct mgmt_ev_new_key ev; | 2376 | struct mgmt_ev_new_link_key ev; |
2054 | 2377 | ||
2055 | memset(&ev, 0, sizeof(ev)); | 2378 | memset(&ev, 0, sizeof(ev)); |
2056 | 2379 | ||
@@ -2060,17 +2383,18 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) | |||
2060 | memcpy(ev.key.val, key->val, 16); | 2383 | memcpy(ev.key.val, key->val, 16); |
2061 | ev.key.pin_len = key->pin_len; | 2384 | ev.key.pin_len = key->pin_len; |
2062 | 2385 | ||
2063 | return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); | 2386 | return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); |
2064 | } | 2387 | } |
2065 | 2388 | ||
2066 | int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type) | 2389 | int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
2390 | u8 addr_type) | ||
2067 | { | 2391 | { |
2068 | struct mgmt_ev_connected ev; | 2392 | struct mgmt_addr_info ev; |
2069 | 2393 | ||
2070 | bacpy(&ev.bdaddr, bdaddr); | 2394 | bacpy(&ev.bdaddr, bdaddr); |
2071 | ev.link_type = link_type; | 2395 | ev.type = link_to_mgmt(link_type, addr_type); |
2072 | 2396 | ||
2073 | return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); | 2397 | return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL); |
2074 | } | 2398 | } |
2075 | 2399 | ||
2076 | static void disconnect_rsp(struct pending_cmd *cmd, void *data) | 2400 | static void disconnect_rsp(struct pending_cmd *cmd, void *data) |
@@ -2080,6 +2404,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) | |||
2080 | struct mgmt_rp_disconnect rp; | 2404 | struct mgmt_rp_disconnect rp; |
2081 | 2405 | ||
2082 | bacpy(&rp.bdaddr, &cp->bdaddr); | 2406 | bacpy(&rp.bdaddr, &cp->bdaddr); |
2407 | rp.status = 0; | ||
2083 | 2408 | ||
2084 | cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); | 2409 | cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); |
2085 | 2410 | ||
@@ -2089,75 +2414,110 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) | |||
2089 | mgmt_pending_remove(cmd); | 2414 | mgmt_pending_remove(cmd); |
2090 | } | 2415 | } |
2091 | 2416 | ||
2092 | int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) | 2417 | static void remove_keys_rsp(struct pending_cmd *cmd, void *data) |
2418 | { | ||
2419 | u8 *status = data; | ||
2420 | struct mgmt_cp_remove_keys *cp = cmd->param; | ||
2421 | struct mgmt_rp_remove_keys rp; | ||
2422 | |||
2423 | memset(&rp, 0, sizeof(rp)); | ||
2424 | bacpy(&rp.bdaddr, &cp->bdaddr); | ||
2425 | if (status != NULL) | ||
2426 | rp.status = *status; | ||
2427 | |||
2428 | cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp, | ||
2429 | sizeof(rp)); | ||
2430 | |||
2431 | mgmt_pending_remove(cmd); | ||
2432 | } | ||
2433 | |||
2434 | int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, | ||
2435 | u8 addr_type) | ||
2093 | { | 2436 | { |
2094 | struct mgmt_ev_disconnected ev; | 2437 | struct mgmt_addr_info ev; |
2095 | struct sock *sk = NULL; | 2438 | struct sock *sk = NULL; |
2096 | int err; | 2439 | int err; |
2097 | 2440 | ||
2098 | mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); | 2441 | mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); |
2099 | 2442 | ||
2100 | bacpy(&ev.bdaddr, bdaddr); | 2443 | bacpy(&ev.bdaddr, bdaddr); |
2444 | ev.type = link_to_mgmt(link_type, addr_type); | ||
2101 | 2445 | ||
2102 | err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); | 2446 | err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk); |
2103 | 2447 | ||
2104 | if (sk) | 2448 | if (sk) |
2105 | sock_put(sk); | 2449 | sock_put(sk); |
2106 | 2450 | ||
2451 | mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL); | ||
2452 | |||
2107 | return err; | 2453 | return err; |
2108 | } | 2454 | } |
2109 | 2455 | ||
2110 | int mgmt_disconnect_failed(u16 index) | 2456 | int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) |
2111 | { | 2457 | { |
2112 | struct pending_cmd *cmd; | 2458 | struct pending_cmd *cmd; |
2459 | u8 mgmt_err = mgmt_status(status); | ||
2113 | int err; | 2460 | int err; |
2114 | 2461 | ||
2115 | cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index); | 2462 | cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); |
2116 | if (!cmd) | 2463 | if (!cmd) |
2117 | return -ENOENT; | 2464 | return -ENOENT; |
2118 | 2465 | ||
2119 | err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); | 2466 | if (bdaddr) { |
2467 | struct mgmt_rp_disconnect rp; | ||
2468 | |||
2469 | bacpy(&rp.bdaddr, bdaddr); | ||
2470 | rp.status = status; | ||
2471 | |||
2472 | err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, | ||
2473 | &rp, sizeof(rp)); | ||
2474 | } else | ||
2475 | err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, | ||
2476 | mgmt_err); | ||
2120 | 2477 | ||
2121 | mgmt_pending_remove(cmd); | 2478 | mgmt_pending_remove(cmd); |
2122 | 2479 | ||
2123 | return err; | 2480 | return err; |
2124 | } | 2481 | } |
2125 | 2482 | ||
2126 | int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status) | 2483 | int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
2484 | u8 addr_type, u8 status) | ||
2127 | { | 2485 | { |
2128 | struct mgmt_ev_connect_failed ev; | 2486 | struct mgmt_ev_connect_failed ev; |
2129 | 2487 | ||
2130 | bacpy(&ev.bdaddr, bdaddr); | 2488 | bacpy(&ev.addr.bdaddr, bdaddr); |
2131 | ev.status = status; | 2489 | ev.addr.type = link_to_mgmt(link_type, addr_type); |
2490 | ev.status = mgmt_status(status); | ||
2132 | 2491 | ||
2133 | return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); | 2492 | return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); |
2134 | } | 2493 | } |
2135 | 2494 | ||
2136 | int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure) | 2495 | int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) |
2137 | { | 2496 | { |
2138 | struct mgmt_ev_pin_code_request ev; | 2497 | struct mgmt_ev_pin_code_request ev; |
2139 | 2498 | ||
2140 | bacpy(&ev.bdaddr, bdaddr); | 2499 | bacpy(&ev.bdaddr, bdaddr); |
2141 | ev.secure = secure; | 2500 | ev.secure = secure; |
2142 | 2501 | ||
2143 | return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), | 2502 | return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), |
2144 | NULL); | 2503 | NULL); |
2145 | } | 2504 | } |
2146 | 2505 | ||
2147 | int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | 2506 | int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2507 | u8 status) | ||
2148 | { | 2508 | { |
2149 | struct pending_cmd *cmd; | 2509 | struct pending_cmd *cmd; |
2150 | struct mgmt_rp_pin_code_reply rp; | 2510 | struct mgmt_rp_pin_code_reply rp; |
2151 | int err; | 2511 | int err; |
2152 | 2512 | ||
2153 | cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); | 2513 | cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); |
2154 | if (!cmd) | 2514 | if (!cmd) |
2155 | return -ENOENT; | 2515 | return -ENOENT; |
2156 | 2516 | ||
2157 | bacpy(&rp.bdaddr, bdaddr); | 2517 | bacpy(&rp.bdaddr, bdaddr); |
2158 | rp.status = status; | 2518 | rp.status = mgmt_status(status); |
2159 | 2519 | ||
2160 | err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, | 2520 | err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp, |
2161 | sizeof(rp)); | 2521 | sizeof(rp)); |
2162 | 2522 | ||
2163 | mgmt_pending_remove(cmd); | 2523 | mgmt_pending_remove(cmd); |
@@ -2165,20 +2525,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | |||
2165 | return err; | 2525 | return err; |
2166 | } | 2526 | } |
2167 | 2527 | ||
2168 | int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | 2528 | int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2529 | u8 status) | ||
2169 | { | 2530 | { |
2170 | struct pending_cmd *cmd; | 2531 | struct pending_cmd *cmd; |
2171 | struct mgmt_rp_pin_code_reply rp; | 2532 | struct mgmt_rp_pin_code_reply rp; |
2172 | int err; | 2533 | int err; |
2173 | 2534 | ||
2174 | cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); | 2535 | cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); |
2175 | if (!cmd) | 2536 | if (!cmd) |
2176 | return -ENOENT; | 2537 | return -ENOENT; |
2177 | 2538 | ||
2178 | bacpy(&rp.bdaddr, bdaddr); | 2539 | bacpy(&rp.bdaddr, bdaddr); |
2179 | rp.status = status; | 2540 | rp.status = mgmt_status(status); |
2180 | 2541 | ||
2181 | err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, | 2542 | err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, |
2182 | sizeof(rp)); | 2543 | sizeof(rp)); |
2183 | 2544 | ||
2184 | mgmt_pending_remove(cmd); | 2545 | mgmt_pending_remove(cmd); |
@@ -2186,97 +2547,119 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | |||
2186 | return err; | 2547 | return err; |
2187 | } | 2548 | } |
2188 | 2549 | ||
2189 | int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, | 2550 | int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2190 | u8 confirm_hint) | 2551 | __le32 value, u8 confirm_hint) |
2191 | { | 2552 | { |
2192 | struct mgmt_ev_user_confirm_request ev; | 2553 | struct mgmt_ev_user_confirm_request ev; |
2193 | 2554 | ||
2194 | BT_DBG("hci%u", index); | 2555 | BT_DBG("%s", hdev->name); |
2195 | 2556 | ||
2196 | bacpy(&ev.bdaddr, bdaddr); | 2557 | bacpy(&ev.bdaddr, bdaddr); |
2197 | ev.confirm_hint = confirm_hint; | 2558 | ev.confirm_hint = confirm_hint; |
2198 | put_unaligned_le32(value, &ev.value); | 2559 | put_unaligned_le32(value, &ev.value); |
2199 | 2560 | ||
2200 | return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), | 2561 | return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), |
2562 | NULL); | ||
2563 | } | ||
2564 | |||
2565 | int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr) | ||
2566 | { | ||
2567 | struct mgmt_ev_user_passkey_request ev; | ||
2568 | |||
2569 | BT_DBG("%s", hdev->name); | ||
2570 | |||
2571 | bacpy(&ev.bdaddr, bdaddr); | ||
2572 | |||
2573 | return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), | ||
2201 | NULL); | 2574 | NULL); |
2202 | } | 2575 | } |
2203 | 2576 | ||
2204 | static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, | 2577 | static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2205 | u8 opcode) | 2578 | u8 status, u8 opcode) |
2206 | { | 2579 | { |
2207 | struct pending_cmd *cmd; | 2580 | struct pending_cmd *cmd; |
2208 | struct mgmt_rp_user_confirm_reply rp; | 2581 | struct mgmt_rp_user_confirm_reply rp; |
2209 | int err; | 2582 | int err; |
2210 | 2583 | ||
2211 | cmd = mgmt_pending_find(opcode, index); | 2584 | cmd = mgmt_pending_find(opcode, hdev); |
2212 | if (!cmd) | 2585 | if (!cmd) |
2213 | return -ENOENT; | 2586 | return -ENOENT; |
2214 | 2587 | ||
2215 | bacpy(&rp.bdaddr, bdaddr); | 2588 | bacpy(&rp.bdaddr, bdaddr); |
2216 | rp.status = status; | 2589 | rp.status = mgmt_status(status); |
2217 | err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); | 2590 | err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp)); |
2218 | 2591 | ||
2219 | mgmt_pending_remove(cmd); | 2592 | mgmt_pending_remove(cmd); |
2220 | 2593 | ||
2221 | return err; | 2594 | return err; |
2222 | } | 2595 | } |
2223 | 2596 | ||
2224 | int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | 2597 | int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2598 | u8 status) | ||
2225 | { | 2599 | { |
2226 | return confirm_reply_complete(index, bdaddr, status, | 2600 | return user_pairing_resp_complete(hdev, bdaddr, status, |
2227 | MGMT_OP_USER_CONFIRM_REPLY); | 2601 | MGMT_OP_USER_CONFIRM_REPLY); |
2228 | } | 2602 | } |
2229 | 2603 | ||
2230 | int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) | 2604 | int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, |
2605 | bdaddr_t *bdaddr, u8 status) | ||
2231 | { | 2606 | { |
2232 | return confirm_reply_complete(index, bdaddr, status, | 2607 | return user_pairing_resp_complete(hdev, bdaddr, status, |
2233 | MGMT_OP_USER_CONFIRM_NEG_REPLY); | 2608 | MGMT_OP_USER_CONFIRM_NEG_REPLY); |
2234 | } | 2609 | } |
2235 | 2610 | ||
2236 | int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) | 2611 | int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2612 | u8 status) | ||
2613 | { | ||
2614 | return user_pairing_resp_complete(hdev, bdaddr, status, | ||
2615 | MGMT_OP_USER_PASSKEY_REPLY); | ||
2616 | } | ||
2617 | |||
2618 | int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, | ||
2619 | bdaddr_t *bdaddr, u8 status) | ||
2620 | { | ||
2621 | return user_pairing_resp_complete(hdev, bdaddr, status, | ||
2622 | MGMT_OP_USER_PASSKEY_NEG_REPLY); | ||
2623 | } | ||
2624 | |||
2625 | int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) | ||
2237 | { | 2626 | { |
2238 | struct mgmt_ev_auth_failed ev; | 2627 | struct mgmt_ev_auth_failed ev; |
2239 | 2628 | ||
2240 | bacpy(&ev.bdaddr, bdaddr); | 2629 | bacpy(&ev.bdaddr, bdaddr); |
2241 | ev.status = status; | 2630 | ev.status = mgmt_status(status); |
2242 | 2631 | ||
2243 | return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); | 2632 | return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); |
2244 | } | 2633 | } |
2245 | 2634 | ||
2246 | int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status) | 2635 | int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) |
2247 | { | 2636 | { |
2248 | struct pending_cmd *cmd; | 2637 | struct pending_cmd *cmd; |
2249 | struct hci_dev *hdev; | ||
2250 | struct mgmt_cp_set_local_name ev; | 2638 | struct mgmt_cp_set_local_name ev; |
2251 | int err; | 2639 | int err; |
2252 | 2640 | ||
2253 | memset(&ev, 0, sizeof(ev)); | 2641 | memset(&ev, 0, sizeof(ev)); |
2254 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); | 2642 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); |
2255 | 2643 | ||
2256 | cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index); | 2644 | cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); |
2257 | if (!cmd) | 2645 | if (!cmd) |
2258 | goto send_event; | 2646 | goto send_event; |
2259 | 2647 | ||
2260 | if (status) { | 2648 | if (status) { |
2261 | err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO); | 2649 | err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, |
2650 | mgmt_status(status)); | ||
2262 | goto failed; | 2651 | goto failed; |
2263 | } | 2652 | } |
2264 | 2653 | ||
2265 | hdev = hci_dev_get(index); | 2654 | update_eir(hdev); |
2266 | if (hdev) { | ||
2267 | hci_dev_lock_bh(hdev); | ||
2268 | update_eir(hdev); | ||
2269 | hci_dev_unlock_bh(hdev); | ||
2270 | hci_dev_put(hdev); | ||
2271 | } | ||
2272 | 2655 | ||
2273 | err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev, | 2656 | err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev, |
2274 | sizeof(ev)); | 2657 | sizeof(ev)); |
2275 | if (err < 0) | 2658 | if (err < 0) |
2276 | goto failed; | 2659 | goto failed; |
2277 | 2660 | ||
2278 | send_event: | 2661 | send_event: |
2279 | err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev), | 2662 | err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), |
2280 | cmd ? cmd->sk : NULL); | 2663 | cmd ? cmd->sk : NULL); |
2281 | 2664 | ||
2282 | failed: | 2665 | failed: |
@@ -2285,29 +2668,31 @@ failed: | |||
2285 | return err; | 2668 | return err; |
2286 | } | 2669 | } |
2287 | 2670 | ||
2288 | int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, | 2671 | int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, |
2289 | u8 status) | 2672 | u8 *randomizer, u8 status) |
2290 | { | 2673 | { |
2291 | struct pending_cmd *cmd; | 2674 | struct pending_cmd *cmd; |
2292 | int err; | 2675 | int err; |
2293 | 2676 | ||
2294 | BT_DBG("hci%u status %u", index, status); | 2677 | BT_DBG("%s status %u", hdev->name, status); |
2295 | 2678 | ||
2296 | cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index); | 2679 | cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); |
2297 | if (!cmd) | 2680 | if (!cmd) |
2298 | return -ENOENT; | 2681 | return -ENOENT; |
2299 | 2682 | ||
2300 | if (status) { | 2683 | if (status) { |
2301 | err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, | 2684 | err = cmd_status(cmd->sk, hdev->id, |
2302 | EIO); | 2685 | MGMT_OP_READ_LOCAL_OOB_DATA, |
2686 | mgmt_status(status)); | ||
2303 | } else { | 2687 | } else { |
2304 | struct mgmt_rp_read_local_oob_data rp; | 2688 | struct mgmt_rp_read_local_oob_data rp; |
2305 | 2689 | ||
2306 | memcpy(rp.hash, hash, sizeof(rp.hash)); | 2690 | memcpy(rp.hash, hash, sizeof(rp.hash)); |
2307 | memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); | 2691 | memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); |
2308 | 2692 | ||
2309 | err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, | 2693 | err = cmd_complete(cmd->sk, hdev->id, |
2310 | &rp, sizeof(rp)); | 2694 | MGMT_OP_READ_LOCAL_OOB_DATA, |
2695 | &rp, sizeof(rp)); | ||
2311 | } | 2696 | } |
2312 | 2697 | ||
2313 | mgmt_pending_remove(cmd); | 2698 | mgmt_pending_remove(cmd); |
@@ -2315,14 +2700,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, | |||
2315 | return err; | 2700 | return err; |
2316 | } | 2701 | } |
2317 | 2702 | ||
2318 | int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, | 2703 | int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
2319 | u8 *eir) | 2704 | u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir) |
2320 | { | 2705 | { |
2321 | struct mgmt_ev_device_found ev; | 2706 | struct mgmt_ev_device_found ev; |
2322 | 2707 | ||
2323 | memset(&ev, 0, sizeof(ev)); | 2708 | memset(&ev, 0, sizeof(ev)); |
2324 | 2709 | ||
2325 | bacpy(&ev.bdaddr, bdaddr); | 2710 | bacpy(&ev.addr.bdaddr, bdaddr); |
2711 | ev.addr.type = link_to_mgmt(link_type, addr_type); | ||
2326 | ev.rssi = rssi; | 2712 | ev.rssi = rssi; |
2327 | 2713 | ||
2328 | if (eir) | 2714 | if (eir) |
@@ -2331,10 +2717,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, | |||
2331 | if (dev_class) | 2717 | if (dev_class) |
2332 | memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); | 2718 | memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); |
2333 | 2719 | ||
2334 | return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); | 2720 | return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL); |
2335 | } | 2721 | } |
2336 | 2722 | ||
2337 | int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) | 2723 | int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) |
2338 | { | 2724 | { |
2339 | struct mgmt_ev_remote_name ev; | 2725 | struct mgmt_ev_remote_name ev; |
2340 | 2726 | ||
@@ -2343,37 +2729,79 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) | |||
2343 | bacpy(&ev.bdaddr, bdaddr); | 2729 | bacpy(&ev.bdaddr, bdaddr); |
2344 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); | 2730 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); |
2345 | 2731 | ||
2346 | return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL); | 2732 | return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL); |
2733 | } | ||
2734 | |||
2735 | int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) | ||
2736 | { | ||
2737 | struct pending_cmd *cmd; | ||
2738 | int err; | ||
2739 | |||
2740 | cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); | ||
2741 | if (!cmd) | ||
2742 | return -ENOENT; | ||
2743 | |||
2744 | err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); | ||
2745 | mgmt_pending_remove(cmd); | ||
2746 | |||
2747 | return err; | ||
2748 | } | ||
2749 | |||
2750 | int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) | ||
2751 | { | ||
2752 | struct pending_cmd *cmd; | ||
2753 | int err; | ||
2754 | |||
2755 | cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); | ||
2756 | if (!cmd) | ||
2757 | return -ENOENT; | ||
2758 | |||
2759 | err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status); | ||
2760 | mgmt_pending_remove(cmd); | ||
2761 | |||
2762 | return err; | ||
2347 | } | 2763 | } |
2348 | 2764 | ||
2349 | int mgmt_discovering(u16 index, u8 discovering) | 2765 | int mgmt_discovering(struct hci_dev *hdev, u8 discovering) |
2350 | { | 2766 | { |
2351 | return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, | 2767 | struct pending_cmd *cmd; |
2768 | |||
2769 | if (discovering) | ||
2770 | cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); | ||
2771 | else | ||
2772 | cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); | ||
2773 | |||
2774 | if (cmd != NULL) { | ||
2775 | cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0); | ||
2776 | mgmt_pending_remove(cmd); | ||
2777 | } | ||
2778 | |||
2779 | return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering, | ||
2352 | sizeof(discovering), NULL); | 2780 | sizeof(discovering), NULL); |
2353 | } | 2781 | } |
2354 | 2782 | ||
2355 | int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr) | 2783 | int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr) |
2356 | { | 2784 | { |
2357 | struct pending_cmd *cmd; | 2785 | struct pending_cmd *cmd; |
2358 | struct mgmt_ev_device_blocked ev; | 2786 | struct mgmt_ev_device_blocked ev; |
2359 | 2787 | ||
2360 | cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index); | 2788 | cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); |
2361 | 2789 | ||
2362 | bacpy(&ev.bdaddr, bdaddr); | 2790 | bacpy(&ev.bdaddr, bdaddr); |
2363 | 2791 | ||
2364 | return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev), | 2792 | return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), |
2365 | cmd ? cmd->sk : NULL); | 2793 | cmd ? cmd->sk : NULL); |
2366 | } | 2794 | } |
2367 | 2795 | ||
2368 | int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr) | 2796 | int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr) |
2369 | { | 2797 | { |
2370 | struct pending_cmd *cmd; | 2798 | struct pending_cmd *cmd; |
2371 | struct mgmt_ev_device_unblocked ev; | 2799 | struct mgmt_ev_device_unblocked ev; |
2372 | 2800 | ||
2373 | cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index); | 2801 | cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); |
2374 | 2802 | ||
2375 | bacpy(&ev.bdaddr, bdaddr); | 2803 | bacpy(&ev.bdaddr, bdaddr); |
2376 | 2804 | ||
2377 | return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev), | 2805 | return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), |
2378 | cmd ? cmd->sk : NULL); | 2806 | cmd ? cmd->sk : NULL); |
2379 | } | 2807 | } |
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig index 405a0e61e7dc..22e718b554e4 100644 --- a/net/bluetooth/rfcomm/Kconfig +++ b/net/bluetooth/rfcomm/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BT_RFCOMM | 1 | config BT_RFCOMM |
2 | tristate "RFCOMM protocol support" | 2 | tristate "RFCOMM protocol support" |
3 | depends on BT && BT_L2CAP | 3 | depends on BT |
4 | help | 4 | help |
5 | RFCOMM provides connection oriented stream transport. RFCOMM | 5 | RFCOMM provides connection oriented stream transport. RFCOMM |
6 | support is required for Dialup Networking, OBEX and other Bluetooth | 6 | support is required for Dialup Networking, OBEX and other Bluetooth |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 2d28dfe98389..501649bf5596 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -51,8 +51,8 @@ | |||
51 | 51 | ||
52 | #define VERSION "1.11" | 52 | #define VERSION "1.11" |
53 | 53 | ||
54 | static int disable_cfc; | 54 | static bool disable_cfc; |
55 | static int l2cap_ertm; | 55 | static bool l2cap_ertm; |
56 | static int channel_mtu = -1; | 56 | static int channel_mtu = -1; |
57 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; | 57 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; |
58 | 58 | ||
@@ -377,13 +377,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) | |||
377 | static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) | 377 | static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) |
378 | { | 378 | { |
379 | struct rfcomm_dlc *d; | 379 | struct rfcomm_dlc *d; |
380 | struct list_head *p; | ||
381 | 380 | ||
382 | list_for_each(p, &s->dlcs) { | 381 | list_for_each_entry(d, &s->dlcs, list) |
383 | d = list_entry(p, struct rfcomm_dlc, list); | ||
384 | if (d->dlci == dlci) | 382 | if (d->dlci == dlci) |
385 | return d; | 383 | return d; |
386 | } | 384 | |
387 | return NULL; | 385 | return NULL; |
388 | } | 386 | } |
389 | 387 | ||
@@ -751,7 +749,6 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d | |||
751 | /* ---- RFCOMM frame sending ---- */ | 749 | /* ---- RFCOMM frame sending ---- */ |
752 | static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) | 750 | static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) |
753 | { | 751 | { |
754 | struct socket *sock = s->sock; | ||
755 | struct kvec iv = { data, len }; | 752 | struct kvec iv = { data, len }; |
756 | struct msghdr msg; | 753 | struct msghdr msg; |
757 | 754 | ||
@@ -759,7 +756,14 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) | |||
759 | 756 | ||
760 | memset(&msg, 0, sizeof(msg)); | 757 | memset(&msg, 0, sizeof(msg)); |
761 | 758 | ||
762 | return kernel_sendmsg(sock, &msg, &iv, 1, len); | 759 | return kernel_sendmsg(s->sock, &msg, &iv, 1, len); |
760 | } | ||
761 | |||
762 | static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd) | ||
763 | { | ||
764 | BT_DBG("%p cmd %u", s, cmd->ctrl); | ||
765 | |||
766 | return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd)); | ||
763 | } | 767 | } |
764 | 768 | ||
765 | static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) | 769 | static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) |
@@ -773,7 +777,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) | |||
773 | cmd.len = __len8(0); | 777 | cmd.len = __len8(0); |
774 | cmd.fcs = __fcs2((u8 *) &cmd); | 778 | cmd.fcs = __fcs2((u8 *) &cmd); |
775 | 779 | ||
776 | return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); | 780 | return rfcomm_send_cmd(s, &cmd); |
777 | } | 781 | } |
778 | 782 | ||
779 | static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) | 783 | static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) |
@@ -787,7 +791,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) | |||
787 | cmd.len = __len8(0); | 791 | cmd.len = __len8(0); |
788 | cmd.fcs = __fcs2((u8 *) &cmd); | 792 | cmd.fcs = __fcs2((u8 *) &cmd); |
789 | 793 | ||
790 | return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); | 794 | return rfcomm_send_cmd(s, &cmd); |
791 | } | 795 | } |
792 | 796 | ||
793 | static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) | 797 | static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) |
@@ -801,7 +805,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) | |||
801 | cmd.len = __len8(0); | 805 | cmd.len = __len8(0); |
802 | cmd.fcs = __fcs2((u8 *) &cmd); | 806 | cmd.fcs = __fcs2((u8 *) &cmd); |
803 | 807 | ||
804 | return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); | 808 | return rfcomm_send_cmd(s, &cmd); |
805 | } | 809 | } |
806 | 810 | ||
807 | static int rfcomm_queue_disc(struct rfcomm_dlc *d) | 811 | static int rfcomm_queue_disc(struct rfcomm_dlc *d) |
@@ -837,7 +841,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) | |||
837 | cmd.len = __len8(0); | 841 | cmd.len = __len8(0); |
838 | cmd.fcs = __fcs2((u8 *) &cmd); | 842 | cmd.fcs = __fcs2((u8 *) &cmd); |
839 | 843 | ||
840 | return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); | 844 | return rfcomm_send_cmd(s, &cmd); |
841 | } | 845 | } |
842 | 846 | ||
843 | static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) | 847 | static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) |
@@ -2121,15 +2125,13 @@ static struct hci_cb rfcomm_cb = { | |||
2121 | static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) | 2125 | static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) |
2122 | { | 2126 | { |
2123 | struct rfcomm_session *s; | 2127 | struct rfcomm_session *s; |
2124 | struct list_head *pp, *p; | ||
2125 | 2128 | ||
2126 | rfcomm_lock(); | 2129 | rfcomm_lock(); |
2127 | 2130 | ||
2128 | list_for_each(p, &session_list) { | 2131 | list_for_each_entry(s, &session_list, list) { |
2129 | s = list_entry(p, struct rfcomm_session, list); | 2132 | struct rfcomm_dlc *d; |
2130 | list_for_each(pp, &s->dlcs) { | 2133 | list_for_each_entry(d, &s->dlcs, list) { |
2131 | struct sock *sk = s->sock->sk; | 2134 | struct sock *sk = s->sock->sk; |
2132 | struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); | ||
2133 | 2135 | ||
2134 | seq_printf(f, "%s %s %ld %d %d %d %d\n", | 2136 | seq_printf(f, "%s %s %ld %d %d %d %d\n", |
2135 | batostr(&bt_sk(sk)->src), | 2137 | batostr(&bt_sk(sk)->src), |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 5417f6127323..aea2bdd1510f 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
600 | break; | 600 | break; |
601 | } | 601 | } |
602 | 602 | ||
603 | skb->priority = sk->sk_priority; | ||
604 | |||
603 | err = rfcomm_dlc_send(d, skb); | 605 | err = rfcomm_dlc_send(d, skb); |
604 | if (err < 0) { | 606 | if (err < 0) { |
605 | kfree_skb(skb); | 607 | kfree_skb(skb); |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index c258796313e0..fa8f4de53b99 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/capability.h> | 34 | #include <linux/capability.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
37 | #include <linux/workqueue.h> | ||
37 | 38 | ||
38 | #include <net/bluetooth/bluetooth.h> | 39 | #include <net/bluetooth/bluetooth.h> |
39 | #include <net/bluetooth/hci_core.h> | 40 | #include <net/bluetooth/hci_core.h> |
@@ -65,7 +66,7 @@ struct rfcomm_dev { | |||
65 | struct rfcomm_dlc *dlc; | 66 | struct rfcomm_dlc *dlc; |
66 | struct tty_struct *tty; | 67 | struct tty_struct *tty; |
67 | wait_queue_head_t wait; | 68 | wait_queue_head_t wait; |
68 | struct tasklet_struct wakeup_task; | 69 | struct work_struct wakeup_task; |
69 | 70 | ||
70 | struct device *tty_dev; | 71 | struct device *tty_dev; |
71 | 72 | ||
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); | |||
81 | static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); | 82 | static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); |
82 | static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); | 83 | static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); |
83 | 84 | ||
84 | static void rfcomm_tty_wakeup(unsigned long arg); | 85 | static void rfcomm_tty_wakeup(struct work_struct *work); |
85 | 86 | ||
86 | /* ---- Device functions ---- */ | 87 | /* ---- Device functions ---- */ |
87 | static void rfcomm_dev_destruct(struct rfcomm_dev *dev) | 88 | static void rfcomm_dev_destruct(struct rfcomm_dev *dev) |
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev) | |||
133 | static struct rfcomm_dev *__rfcomm_dev_get(int id) | 134 | static struct rfcomm_dev *__rfcomm_dev_get(int id) |
134 | { | 135 | { |
135 | struct rfcomm_dev *dev; | 136 | struct rfcomm_dev *dev; |
136 | struct list_head *p; | ||
137 | 137 | ||
138 | list_for_each(p, &rfcomm_dev_list) { | 138 | list_for_each_entry(dev, &rfcomm_dev_list, list) |
139 | dev = list_entry(p, struct rfcomm_dev, list); | ||
140 | if (dev->id == id) | 139 | if (dev->id == id) |
141 | return dev; | 140 | return dev; |
142 | } | ||
143 | 141 | ||
144 | return NULL; | 142 | return NULL; |
145 | } | 143 | } |
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); | |||
197 | 195 | ||
198 | static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | 196 | static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) |
199 | { | 197 | { |
200 | struct rfcomm_dev *dev; | 198 | struct rfcomm_dev *dev, *entry; |
201 | struct list_head *head = &rfcomm_dev_list, *p; | 199 | struct list_head *head = &rfcomm_dev_list, *p; |
202 | int err = 0; | 200 | int err = 0; |
203 | 201 | ||
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | |||
212 | if (req->dev_id < 0) { | 210 | if (req->dev_id < 0) { |
213 | dev->id = 0; | 211 | dev->id = 0; |
214 | 212 | ||
215 | list_for_each(p, &rfcomm_dev_list) { | 213 | list_for_each_entry(entry, &rfcomm_dev_list, list) { |
216 | if (list_entry(p, struct rfcomm_dev, list)->id != dev->id) | 214 | if (entry->id != dev->id) |
217 | break; | 215 | break; |
218 | 216 | ||
219 | dev->id++; | 217 | dev->id++; |
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | |||
222 | } else { | 220 | } else { |
223 | dev->id = req->dev_id; | 221 | dev->id = req->dev_id; |
224 | 222 | ||
225 | list_for_each(p, &rfcomm_dev_list) { | 223 | list_for_each_entry(entry, &rfcomm_dev_list, list) { |
226 | struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list); | ||
227 | |||
228 | if (entry->id == dev->id) { | 224 | if (entry->id == dev->id) { |
229 | err = -EADDRINUSE; | 225 | err = -EADDRINUSE; |
230 | goto out; | 226 | goto out; |
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) | |||
257 | atomic_set(&dev->opened, 0); | 253 | atomic_set(&dev->opened, 0); |
258 | 254 | ||
259 | init_waitqueue_head(&dev->wait); | 255 | init_waitqueue_head(&dev->wait); |
260 | tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); | 256 | INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup); |
261 | 257 | ||
262 | skb_queue_head_init(&dev->pending); | 258 | skb_queue_head_init(&dev->pending); |
263 | 259 | ||
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb) | |||
351 | struct rfcomm_dev *dev = (void *) skb->sk; | 347 | struct rfcomm_dev *dev = (void *) skb->sk; |
352 | atomic_sub(skb->truesize, &dev->wmem_alloc); | 348 | atomic_sub(skb->truesize, &dev->wmem_alloc); |
353 | if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) | 349 | if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) |
354 | tasklet_schedule(&dev->wakeup_task); | 350 | queue_work(system_nrt_wq, &dev->wakeup_task); |
355 | rfcomm_dev_put(dev); | 351 | rfcomm_dev_put(dev); |
356 | } | 352 | } |
357 | 353 | ||
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg) | |||
455 | 451 | ||
456 | static int rfcomm_get_dev_list(void __user *arg) | 452 | static int rfcomm_get_dev_list(void __user *arg) |
457 | { | 453 | { |
454 | struct rfcomm_dev *dev; | ||
458 | struct rfcomm_dev_list_req *dl; | 455 | struct rfcomm_dev_list_req *dl; |
459 | struct rfcomm_dev_info *di; | 456 | struct rfcomm_dev_info *di; |
460 | struct list_head *p; | ||
461 | int n = 0, size, err; | 457 | int n = 0, size, err; |
462 | u16 dev_num; | 458 | u16 dev_num; |
463 | 459 | ||
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg) | |||
479 | 475 | ||
480 | read_lock_bh(&rfcomm_dev_lock); | 476 | read_lock_bh(&rfcomm_dev_lock); |
481 | 477 | ||
482 | list_for_each(p, &rfcomm_dev_list) { | 478 | list_for_each_entry(dev, &rfcomm_dev_list, list) { |
483 | struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); | ||
484 | if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) | 479 | if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) |
485 | continue; | 480 | continue; |
486 | (di + n)->id = dev->id; | 481 | (di + n)->id = dev->id; |
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) | |||
635 | } | 630 | } |
636 | 631 | ||
637 | /* ---- TTY functions ---- */ | 632 | /* ---- TTY functions ---- */ |
638 | static void rfcomm_tty_wakeup(unsigned long arg) | 633 | static void rfcomm_tty_wakeup(struct work_struct *work) |
639 | { | 634 | { |
640 | struct rfcomm_dev *dev = (void *) arg; | 635 | struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev, |
636 | wakeup_task); | ||
641 | struct tty_struct *tty = dev->tty; | 637 | struct tty_struct *tty = dev->tty; |
642 | if (!tty) | 638 | if (!tty) |
643 | return; | 639 | return; |
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) | |||
762 | rfcomm_dlc_close(dev->dlc, 0); | 758 | rfcomm_dlc_close(dev->dlc, 0); |
763 | 759 | ||
764 | clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); | 760 | clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); |
765 | tasklet_kill(&dev->wakeup_task); | 761 | cancel_work_sync(&dev->wakeup_task); |
766 | 762 | ||
767 | rfcomm_dlc_lock(dev->dlc); | 763 | rfcomm_dlc_lock(dev->dlc); |
768 | tty->driver_data = NULL; | 764 | tty->driver_data = NULL; |
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = { | |||
1155 | 1151 | ||
1156 | int __init rfcomm_init_ttys(void) | 1152 | int __init rfcomm_init_ttys(void) |
1157 | { | 1153 | { |
1154 | int error; | ||
1155 | |||
1158 | rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); | 1156 | rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); |
1159 | if (!rfcomm_tty_driver) | 1157 | if (!rfcomm_tty_driver) |
1160 | return -1; | 1158 | return -ENOMEM; |
1161 | 1159 | ||
1162 | rfcomm_tty_driver->owner = THIS_MODULE; | 1160 | rfcomm_tty_driver->owner = THIS_MODULE; |
1163 | rfcomm_tty_driver->driver_name = "rfcomm"; | 1161 | rfcomm_tty_driver->driver_name = "rfcomm"; |
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void) | |||
1172 | rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; | 1170 | rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; |
1173 | tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); | 1171 | tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); |
1174 | 1172 | ||
1175 | if (tty_register_driver(rfcomm_tty_driver)) { | 1173 | error = tty_register_driver(rfcomm_tty_driver); |
1174 | if (error) { | ||
1176 | BT_ERR("Can't register RFCOMM TTY driver"); | 1175 | BT_ERR("Can't register RFCOMM TTY driver"); |
1177 | put_tty_driver(rfcomm_tty_driver); | 1176 | put_tty_driver(rfcomm_tty_driver); |
1178 | return -1; | 1177 | return error; |
1179 | } | 1178 | } |
1180 | 1179 | ||
1181 | BT_INFO("RFCOMM TTY layer initialized"); | 1180 | BT_INFO("RFCOMM TTY layer initialized"); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index a324b009e34b..5dc2f2126fac 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include <net/bluetooth/hci_core.h> | 51 | #include <net/bluetooth/hci_core.h> |
52 | #include <net/bluetooth/sco.h> | 52 | #include <net/bluetooth/sco.h> |
53 | 53 | ||
54 | static int disable_esco; | 54 | static bool disable_esco; |
55 | 55 | ||
56 | static const struct proto_ops sco_sock_ops; | 56 | static const struct proto_ops sco_sock_ops; |
57 | 57 | ||
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk) | |||
189 | if (!hdev) | 189 | if (!hdev) |
190 | return -EHOSTUNREACH; | 190 | return -EHOSTUNREACH; |
191 | 191 | ||
192 | hci_dev_lock_bh(hdev); | 192 | hci_dev_lock(hdev); |
193 | 193 | ||
194 | if (lmp_esco_capable(hdev) && !disable_esco) | 194 | if (lmp_esco_capable(hdev) && !disable_esco) |
195 | type = ESCO_LINK; | 195 | type = ESCO_LINK; |
@@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | done: | 227 | done: |
228 | hci_dev_unlock_bh(hdev); | 228 | hci_dev_unlock(hdev); |
229 | hci_dev_put(hdev); | 229 | hci_dev_put(hdev); |
230 | return err; | 230 | return err; |
231 | } | 231 | } |
@@ -893,15 +893,12 @@ done: | |||
893 | } | 893 | } |
894 | 894 | ||
895 | /* ----- SCO interface with lower layer (HCI) ----- */ | 895 | /* ----- SCO interface with lower layer (HCI) ----- */ |
896 | static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) | 896 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) |
897 | { | 897 | { |
898 | register struct sock *sk; | 898 | register struct sock *sk; |
899 | struct hlist_node *node; | 899 | struct hlist_node *node; |
900 | int lm = 0; | 900 | int lm = 0; |
901 | 901 | ||
902 | if (type != SCO_LINK && type != ESCO_LINK) | ||
903 | return -EINVAL; | ||
904 | |||
905 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); | 902 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); |
906 | 903 | ||
907 | /* Find listening sockets */ | 904 | /* Find listening sockets */ |
@@ -921,13 +918,9 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) | |||
921 | return lm; | 918 | return lm; |
922 | } | 919 | } |
923 | 920 | ||
924 | static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) | 921 | int sco_connect_cfm(struct hci_conn *hcon, __u8 status) |
925 | { | 922 | { |
926 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 923 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); |
927 | |||
928 | if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) | ||
929 | return -EINVAL; | ||
930 | |||
931 | if (!status) { | 924 | if (!status) { |
932 | struct sco_conn *conn; | 925 | struct sco_conn *conn; |
933 | 926 | ||
@@ -940,19 +933,15 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) | |||
940 | return 0; | 933 | return 0; |
941 | } | 934 | } |
942 | 935 | ||
943 | static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) | 936 | int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) |
944 | { | 937 | { |
945 | BT_DBG("hcon %p reason %d", hcon, reason); | 938 | BT_DBG("hcon %p reason %d", hcon, reason); |
946 | 939 | ||
947 | if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) | ||
948 | return -EINVAL; | ||
949 | |||
950 | sco_conn_del(hcon, bt_to_errno(reason)); | 940 | sco_conn_del(hcon, bt_to_errno(reason)); |
951 | |||
952 | return 0; | 941 | return 0; |
953 | } | 942 | } |
954 | 943 | ||
955 | static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) | 944 | int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) |
956 | { | 945 | { |
957 | struct sco_conn *conn = hcon->sco_data; | 946 | struct sco_conn *conn = hcon->sco_data; |
958 | 947 | ||
@@ -1028,15 +1017,6 @@ static const struct net_proto_family sco_sock_family_ops = { | |||
1028 | .create = sco_sock_create, | 1017 | .create = sco_sock_create, |
1029 | }; | 1018 | }; |
1030 | 1019 | ||
1031 | static struct hci_proto sco_hci_proto = { | ||
1032 | .name = "SCO", | ||
1033 | .id = HCI_PROTO_SCO, | ||
1034 | .connect_ind = sco_connect_ind, | ||
1035 | .connect_cfm = sco_connect_cfm, | ||
1036 | .disconn_cfm = sco_disconn_cfm, | ||
1037 | .recv_scodata = sco_recv_scodata | ||
1038 | }; | ||
1039 | |||
1040 | int __init sco_init(void) | 1020 | int __init sco_init(void) |
1041 | { | 1021 | { |
1042 | int err; | 1022 | int err; |
@@ -1051,13 +1031,6 @@ int __init sco_init(void) | |||
1051 | goto error; | 1031 | goto error; |
1052 | } | 1032 | } |
1053 | 1033 | ||
1054 | err = hci_register_proto(&sco_hci_proto); | ||
1055 | if (err < 0) { | ||
1056 | BT_ERR("SCO protocol registration failed"); | ||
1057 | bt_sock_unregister(BTPROTO_SCO); | ||
1058 | goto error; | ||
1059 | } | ||
1060 | |||
1061 | if (bt_debugfs) { | 1034 | if (bt_debugfs) { |
1062 | sco_debugfs = debugfs_create_file("sco", 0444, | 1035 | sco_debugfs = debugfs_create_file("sco", 0444, |
1063 | bt_debugfs, NULL, &sco_debugfs_fops); | 1036 | bt_debugfs, NULL, &sco_debugfs_fops); |
@@ -1081,9 +1054,6 @@ void __exit sco_exit(void) | |||
1081 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 1054 | if (bt_sock_unregister(BTPROTO_SCO) < 0) |
1082 | BT_ERR("SCO socket unregistration failed"); | 1055 | BT_ERR("SCO socket unregistration failed"); |
1083 | 1056 | ||
1084 | if (hci_unregister_proto(&sco_hci_proto) < 0) | ||
1085 | BT_ERR("SCO protocol unregistration failed"); | ||
1086 | |||
1087 | proto_unregister(&sco_proto); | 1057 | proto_unregister(&sco_proto); |
1088 | } | 1058 | } |
1089 | 1059 | ||
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 759b63572641..32c47de30344 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <net/bluetooth/bluetooth.h> | 23 | #include <net/bluetooth/bluetooth.h> |
24 | #include <net/bluetooth/hci_core.h> | 24 | #include <net/bluetooth/hci_core.h> |
25 | #include <net/bluetooth/l2cap.h> | 25 | #include <net/bluetooth/l2cap.h> |
26 | #include <net/bluetooth/mgmt.h> | ||
26 | #include <net/bluetooth/smp.h> | 27 | #include <net/bluetooth/smp.h> |
27 | #include <linux/crypto.h> | 28 | #include <linux/crypto.h> |
28 | #include <linux/scatterlist.h> | 29 | #include <linux/scatterlist.h> |
@@ -181,30 +182,53 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) | |||
181 | if (!skb) | 182 | if (!skb) |
182 | return; | 183 | return; |
183 | 184 | ||
184 | hci_send_acl(conn->hcon, skb, 0); | 185 | skb->priority = HCI_PRIO_MAX; |
186 | hci_send_acl(conn->hchan, skb, 0); | ||
185 | 187 | ||
186 | mod_timer(&conn->security_timer, jiffies + | 188 | cancel_delayed_work_sync(&conn->security_timer); |
189 | schedule_delayed_work(&conn->security_timer, | ||
187 | msecs_to_jiffies(SMP_TIMEOUT)); | 190 | msecs_to_jiffies(SMP_TIMEOUT)); |
188 | } | 191 | } |
189 | 192 | ||
193 | static __u8 authreq_to_seclevel(__u8 authreq) | ||
194 | { | ||
195 | if (authreq & SMP_AUTH_MITM) | ||
196 | return BT_SECURITY_HIGH; | ||
197 | else | ||
198 | return BT_SECURITY_MEDIUM; | ||
199 | } | ||
200 | |||
201 | static __u8 seclevel_to_authreq(__u8 sec_level) | ||
202 | { | ||
203 | switch (sec_level) { | ||
204 | case BT_SECURITY_HIGH: | ||
205 | return SMP_AUTH_MITM | SMP_AUTH_BONDING; | ||
206 | case BT_SECURITY_MEDIUM: | ||
207 | return SMP_AUTH_BONDING; | ||
208 | default: | ||
209 | return SMP_AUTH_NONE; | ||
210 | } | ||
211 | } | ||
212 | |||
190 | static void build_pairing_cmd(struct l2cap_conn *conn, | 213 | static void build_pairing_cmd(struct l2cap_conn *conn, |
191 | struct smp_cmd_pairing *req, | 214 | struct smp_cmd_pairing *req, |
192 | struct smp_cmd_pairing *rsp, | 215 | struct smp_cmd_pairing *rsp, |
193 | __u8 authreq) | 216 | __u8 authreq) |
194 | { | 217 | { |
195 | u8 dist_keys; | 218 | u8 dist_keys = 0; |
196 | 219 | ||
197 | dist_keys = 0; | ||
198 | if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { | 220 | if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { |
199 | dist_keys = SMP_DIST_ENC_KEY; | 221 | dist_keys = SMP_DIST_ENC_KEY; |
200 | authreq |= SMP_AUTH_BONDING; | 222 | authreq |= SMP_AUTH_BONDING; |
223 | } else { | ||
224 | authreq &= ~SMP_AUTH_BONDING; | ||
201 | } | 225 | } |
202 | 226 | ||
203 | if (rsp == NULL) { | 227 | if (rsp == NULL) { |
204 | req->io_capability = conn->hcon->io_capability; | 228 | req->io_capability = conn->hcon->io_capability; |
205 | req->oob_flag = SMP_OOB_NOT_PRESENT; | 229 | req->oob_flag = SMP_OOB_NOT_PRESENT; |
206 | req->max_key_size = SMP_MAX_ENC_KEY_SIZE; | 230 | req->max_key_size = SMP_MAX_ENC_KEY_SIZE; |
207 | req->init_key_dist = dist_keys; | 231 | req->init_key_dist = 0; |
208 | req->resp_key_dist = dist_keys; | 232 | req->resp_key_dist = dist_keys; |
209 | req->auth_req = authreq; | 233 | req->auth_req = authreq; |
210 | return; | 234 | return; |
@@ -213,7 +237,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, | |||
213 | rsp->io_capability = conn->hcon->io_capability; | 237 | rsp->io_capability = conn->hcon->io_capability; |
214 | rsp->oob_flag = SMP_OOB_NOT_PRESENT; | 238 | rsp->oob_flag = SMP_OOB_NOT_PRESENT; |
215 | rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; | 239 | rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; |
216 | rsp->init_key_dist = req->init_key_dist & dist_keys; | 240 | rsp->init_key_dist = 0; |
217 | rsp->resp_key_dist = req->resp_key_dist & dist_keys; | 241 | rsp->resp_key_dist = req->resp_key_dist & dist_keys; |
218 | rsp->auth_req = authreq; | 242 | rsp->auth_req = authreq; |
219 | } | 243 | } |
@@ -231,6 +255,107 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) | |||
231 | return 0; | 255 | return 0; |
232 | } | 256 | } |
233 | 257 | ||
258 | static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) | ||
259 | { | ||
260 | if (send) | ||
261 | smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), | ||
262 | &reason); | ||
263 | |||
264 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend); | ||
265 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason); | ||
266 | cancel_delayed_work_sync(&conn->security_timer); | ||
267 | smp_chan_destroy(conn); | ||
268 | } | ||
269 | |||
270 | #define JUST_WORKS 0x00 | ||
271 | #define JUST_CFM 0x01 | ||
272 | #define REQ_PASSKEY 0x02 | ||
273 | #define CFM_PASSKEY 0x03 | ||
274 | #define REQ_OOB 0x04 | ||
275 | #define OVERLAP 0xFF | ||
276 | |||
277 | static const u8 gen_method[5][5] = { | ||
278 | { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, | ||
279 | { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, | ||
280 | { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, | ||
281 | { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, | ||
282 | { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, | ||
283 | }; | ||
284 | |||
285 | static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, | ||
286 | u8 local_io, u8 remote_io) | ||
287 | { | ||
288 | struct hci_conn *hcon = conn->hcon; | ||
289 | struct smp_chan *smp = conn->smp_chan; | ||
290 | u8 method; | ||
291 | u32 passkey = 0; | ||
292 | int ret = 0; | ||
293 | |||
294 | /* Initialize key for JUST WORKS */ | ||
295 | memset(smp->tk, 0, sizeof(smp->tk)); | ||
296 | clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); | ||
297 | |||
298 | BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); | ||
299 | |||
300 | /* If neither side wants MITM, use JUST WORKS */ | ||
301 | /* If either side has unknown io_caps, use JUST WORKS */ | ||
302 | /* Otherwise, look up method from the table */ | ||
303 | if (!(auth & SMP_AUTH_MITM) || | ||
304 | local_io > SMP_IO_KEYBOARD_DISPLAY || | ||
305 | remote_io > SMP_IO_KEYBOARD_DISPLAY) | ||
306 | method = JUST_WORKS; | ||
307 | else | ||
308 | method = gen_method[local_io][remote_io]; | ||
309 | |||
310 | /* If not bonding, don't ask user to confirm a Zero TK */ | ||
311 | if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) | ||
312 | method = JUST_WORKS; | ||
313 | |||
314 | /* If Just Works, Continue with Zero TK */ | ||
315 | if (method == JUST_WORKS) { | ||
316 | set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | /* Not Just Works/Confirm results in MITM Authentication */ | ||
321 | if (method != JUST_CFM) | ||
322 | set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); | ||
323 | |||
324 | /* If both devices have Keyoard-Display I/O, the master | ||
325 | * Confirms and the slave Enters the passkey. | ||
326 | */ | ||
327 | if (method == OVERLAP) { | ||
328 | if (hcon->link_mode & HCI_LM_MASTER) | ||
329 | method = CFM_PASSKEY; | ||
330 | else | ||
331 | method = REQ_PASSKEY; | ||
332 | } | ||
333 | |||
334 | /* Generate random passkey. Not valid until confirmed. */ | ||
335 | if (method == CFM_PASSKEY) { | ||
336 | u8 key[16]; | ||
337 | |||
338 | memset(key, 0, sizeof(key)); | ||
339 | get_random_bytes(&passkey, sizeof(passkey)); | ||
340 | passkey %= 1000000; | ||
341 | put_unaligned_le32(passkey, key); | ||
342 | swap128(key, smp->tk); | ||
343 | BT_DBG("PassKey: %d", passkey); | ||
344 | } | ||
345 | |||
346 | hci_dev_lock(hcon->hdev); | ||
347 | |||
348 | if (method == REQ_PASSKEY) | ||
349 | ret = mgmt_user_passkey_request(hcon->hdev, conn->dst); | ||
350 | else | ||
351 | ret = mgmt_user_confirm_request(hcon->hdev, conn->dst, | ||
352 | cpu_to_le32(passkey), 0); | ||
353 | |||
354 | hci_dev_unlock(hcon->hdev); | ||
355 | |||
356 | return ret; | ||
357 | } | ||
358 | |||
234 | static void confirm_work(struct work_struct *work) | 359 | static void confirm_work(struct work_struct *work) |
235 | { | 360 | { |
236 | struct smp_chan *smp = container_of(work, struct smp_chan, confirm); | 361 | struct smp_chan *smp = container_of(work, struct smp_chan, confirm); |
@@ -263,14 +388,15 @@ static void confirm_work(struct work_struct *work) | |||
263 | goto error; | 388 | goto error; |
264 | } | 389 | } |
265 | 390 | ||
391 | clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); | ||
392 | |||
266 | swap128(res, cp.confirm_val); | 393 | swap128(res, cp.confirm_val); |
267 | smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); | 394 | smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); |
268 | 395 | ||
269 | return; | 396 | return; |
270 | 397 | ||
271 | error: | 398 | error: |
272 | smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); | 399 | smp_failure(conn, reason, 1); |
273 | smp_chan_destroy(conn); | ||
274 | } | 400 | } |
275 | 401 | ||
276 | static void random_work(struct work_struct *work) | 402 | static void random_work(struct work_struct *work) |
@@ -353,8 +479,7 @@ static void random_work(struct work_struct *work) | |||
353 | return; | 479 | return; |
354 | 480 | ||
355 | error: | 481 | error: |
356 | smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); | 482 | smp_failure(conn, reason, 1); |
357 | smp_chan_destroy(conn); | ||
358 | } | 483 | } |
359 | 484 | ||
360 | static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) | 485 | static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) |
@@ -370,6 +495,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) | |||
370 | 495 | ||
371 | smp->conn = conn; | 496 | smp->conn = conn; |
372 | conn->smp_chan = smp; | 497 | conn->smp_chan = smp; |
498 | conn->hcon->smp_conn = conn; | ||
373 | 499 | ||
374 | hci_conn_hold(conn->hcon); | 500 | hci_conn_hold(conn->hcon); |
375 | 501 | ||
@@ -378,19 +504,73 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) | |||
378 | 504 | ||
379 | void smp_chan_destroy(struct l2cap_conn *conn) | 505 | void smp_chan_destroy(struct l2cap_conn *conn) |
380 | { | 506 | { |
381 | kfree(conn->smp_chan); | 507 | struct smp_chan *smp = conn->smp_chan; |
508 | |||
509 | clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); | ||
510 | |||
511 | if (smp->tfm) | ||
512 | crypto_free_blkcipher(smp->tfm); | ||
513 | |||
514 | kfree(smp); | ||
515 | conn->smp_chan = NULL; | ||
516 | conn->hcon->smp_conn = NULL; | ||
382 | hci_conn_put(conn->hcon); | 517 | hci_conn_put(conn->hcon); |
383 | } | 518 | } |
384 | 519 | ||
520 | int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) | ||
521 | { | ||
522 | struct l2cap_conn *conn = hcon->smp_conn; | ||
523 | struct smp_chan *smp; | ||
524 | u32 value; | ||
525 | u8 key[16]; | ||
526 | |||
527 | BT_DBG(""); | ||
528 | |||
529 | if (!conn) | ||
530 | return -ENOTCONN; | ||
531 | |||
532 | smp = conn->smp_chan; | ||
533 | |||
534 | switch (mgmt_op) { | ||
535 | case MGMT_OP_USER_PASSKEY_REPLY: | ||
536 | value = le32_to_cpu(passkey); | ||
537 | memset(key, 0, sizeof(key)); | ||
538 | BT_DBG("PassKey: %d", value); | ||
539 | put_unaligned_le32(value, key); | ||
540 | swap128(key, smp->tk); | ||
541 | /* Fall Through */ | ||
542 | case MGMT_OP_USER_CONFIRM_REPLY: | ||
543 | set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); | ||
544 | break; | ||
545 | case MGMT_OP_USER_PASSKEY_NEG_REPLY: | ||
546 | case MGMT_OP_USER_CONFIRM_NEG_REPLY: | ||
547 | smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); | ||
548 | return 0; | ||
549 | default: | ||
550 | smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1); | ||
551 | return -EOPNOTSUPP; | ||
552 | } | ||
553 | |||
554 | /* If it is our turn to send Pairing Confirm, do so now */ | ||
555 | if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags)) | ||
556 | queue_work(hcon->hdev->workqueue, &smp->confirm); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
385 | static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | 561 | static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) |
386 | { | 562 | { |
387 | struct smp_cmd_pairing rsp, *req = (void *) skb->data; | 563 | struct smp_cmd_pairing rsp, *req = (void *) skb->data; |
388 | struct smp_chan *smp; | 564 | struct smp_chan *smp; |
389 | u8 key_size; | 565 | u8 key_size; |
566 | u8 auth = SMP_AUTH_NONE; | ||
390 | int ret; | 567 | int ret; |
391 | 568 | ||
392 | BT_DBG("conn %p", conn); | 569 | BT_DBG("conn %p", conn); |
393 | 570 | ||
571 | if (conn->hcon->link_mode & HCI_LM_MASTER) | ||
572 | return SMP_CMD_NOTSUPP; | ||
573 | |||
394 | if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) | 574 | if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) |
395 | smp = smp_chan_create(conn); | 575 | smp = smp_chan_create(conn); |
396 | 576 | ||
@@ -400,19 +580,16 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
400 | memcpy(&smp->preq[1], req, sizeof(*req)); | 580 | memcpy(&smp->preq[1], req, sizeof(*req)); |
401 | skb_pull(skb, sizeof(*req)); | 581 | skb_pull(skb, sizeof(*req)); |
402 | 582 | ||
403 | if (req->oob_flag) | 583 | /* We didn't start the pairing, so match remote */ |
404 | return SMP_OOB_NOT_AVAIL; | 584 | if (req->auth_req & SMP_AUTH_BONDING) |
585 | auth = req->auth_req; | ||
405 | 586 | ||
406 | /* We didn't start the pairing, so no requirements */ | 587 | build_pairing_cmd(conn, req, &rsp, auth); |
407 | build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE); | ||
408 | 588 | ||
409 | key_size = min(req->max_key_size, rsp.max_key_size); | 589 | key_size = min(req->max_key_size, rsp.max_key_size); |
410 | if (check_enc_key_size(conn, key_size)) | 590 | if (check_enc_key_size(conn, key_size)) |
411 | return SMP_ENC_KEY_SIZE; | 591 | return SMP_ENC_KEY_SIZE; |
412 | 592 | ||
413 | /* Just works */ | ||
414 | memset(smp->tk, 0, sizeof(smp->tk)); | ||
415 | |||
416 | ret = smp_rand(smp->prnd); | 593 | ret = smp_rand(smp->prnd); |
417 | if (ret) | 594 | if (ret) |
418 | return SMP_UNSPECIFIED; | 595 | return SMP_UNSPECIFIED; |
@@ -422,6 +599,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
422 | 599 | ||
423 | smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); | 600 | smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); |
424 | 601 | ||
602 | /* Request setup of TK */ | ||
603 | ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); | ||
604 | if (ret) | ||
605 | return SMP_UNSPECIFIED; | ||
606 | |||
425 | return 0; | 607 | return 0; |
426 | } | 608 | } |
427 | 609 | ||
@@ -430,11 +612,14 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
430 | struct smp_cmd_pairing *req, *rsp = (void *) skb->data; | 612 | struct smp_cmd_pairing *req, *rsp = (void *) skb->data; |
431 | struct smp_chan *smp = conn->smp_chan; | 613 | struct smp_chan *smp = conn->smp_chan; |
432 | struct hci_dev *hdev = conn->hcon->hdev; | 614 | struct hci_dev *hdev = conn->hcon->hdev; |
433 | u8 key_size; | 615 | u8 key_size, auth = SMP_AUTH_NONE; |
434 | int ret; | 616 | int ret; |
435 | 617 | ||
436 | BT_DBG("conn %p", conn); | 618 | BT_DBG("conn %p", conn); |
437 | 619 | ||
620 | if (!(conn->hcon->link_mode & HCI_LM_MASTER)) | ||
621 | return SMP_CMD_NOTSUPP; | ||
622 | |||
438 | skb_pull(skb, sizeof(*rsp)); | 623 | skb_pull(skb, sizeof(*rsp)); |
439 | 624 | ||
440 | req = (void *) &smp->preq[1]; | 625 | req = (void *) &smp->preq[1]; |
@@ -443,12 +628,6 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
443 | if (check_enc_key_size(conn, key_size)) | 628 | if (check_enc_key_size(conn, key_size)) |
444 | return SMP_ENC_KEY_SIZE; | 629 | return SMP_ENC_KEY_SIZE; |
445 | 630 | ||
446 | if (rsp->oob_flag) | ||
447 | return SMP_OOB_NOT_AVAIL; | ||
448 | |||
449 | /* Just works */ | ||
450 | memset(smp->tk, 0, sizeof(smp->tk)); | ||
451 | |||
452 | ret = smp_rand(smp->prnd); | 631 | ret = smp_rand(smp->prnd); |
453 | if (ret) | 632 | if (ret) |
454 | return SMP_UNSPECIFIED; | 633 | return SMP_UNSPECIFIED; |
@@ -456,6 +635,22 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
456 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; | 635 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; |
457 | memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); | 636 | memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); |
458 | 637 | ||
638 | if ((req->auth_req & SMP_AUTH_BONDING) && | ||
639 | (rsp->auth_req & SMP_AUTH_BONDING)) | ||
640 | auth = SMP_AUTH_BONDING; | ||
641 | |||
642 | auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; | ||
643 | |||
644 | ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); | ||
645 | if (ret) | ||
646 | return SMP_UNSPECIFIED; | ||
647 | |||
648 | set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); | ||
649 | |||
650 | /* Can't compose response until we have been confirmed */ | ||
651 | if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) | ||
652 | return 0; | ||
653 | |||
459 | queue_work(hdev->workqueue, &smp->confirm); | 654 | queue_work(hdev->workqueue, &smp->confirm); |
460 | 655 | ||
461 | return 0; | 656 | return 0; |
@@ -477,8 +672,10 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) | |||
477 | swap128(smp->prnd, random); | 672 | swap128(smp->prnd, random); |
478 | smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), | 673 | smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random), |
479 | random); | 674 | random); |
480 | } else { | 675 | } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) { |
481 | queue_work(hdev->workqueue, &smp->confirm); | 676 | queue_work(hdev->workqueue, &smp->confirm); |
677 | } else { | ||
678 | set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); | ||
482 | } | 679 | } |
483 | 680 | ||
484 | return 0; | 681 | return 0; |
@@ -531,7 +728,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
531 | 728 | ||
532 | BT_DBG("conn %p", conn); | 729 | BT_DBG("conn %p", conn); |
533 | 730 | ||
534 | hcon->pending_sec_level = BT_SECURITY_MEDIUM; | 731 | hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); |
535 | 732 | ||
536 | if (smp_ltk_encrypt(conn)) | 733 | if (smp_ltk_encrypt(conn)) |
537 | return 0; | 734 | return 0; |
@@ -558,6 +755,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) | |||
558 | { | 755 | { |
559 | struct hci_conn *hcon = conn->hcon; | 756 | struct hci_conn *hcon = conn->hcon; |
560 | struct smp_chan *smp = conn->smp_chan; | 757 | struct smp_chan *smp = conn->smp_chan; |
758 | __u8 authreq; | ||
561 | 759 | ||
562 | BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); | 760 | BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); |
563 | 761 | ||
@@ -578,18 +776,22 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) | |||
578 | return 0; | 776 | return 0; |
579 | 777 | ||
580 | smp = smp_chan_create(conn); | 778 | smp = smp_chan_create(conn); |
779 | if (!smp) | ||
780 | return 1; | ||
781 | |||
782 | authreq = seclevel_to_authreq(sec_level); | ||
581 | 783 | ||
582 | if (hcon->link_mode & HCI_LM_MASTER) { | 784 | if (hcon->link_mode & HCI_LM_MASTER) { |
583 | struct smp_cmd_pairing cp; | 785 | struct smp_cmd_pairing cp; |
584 | 786 | ||
585 | build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE); | 787 | build_pairing_cmd(conn, &cp, NULL, authreq); |
586 | smp->preq[0] = SMP_CMD_PAIRING_REQ; | 788 | smp->preq[0] = SMP_CMD_PAIRING_REQ; |
587 | memcpy(&smp->preq[1], &cp, sizeof(cp)); | 789 | memcpy(&smp->preq[1], &cp, sizeof(cp)); |
588 | 790 | ||
589 | smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); | 791 | smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); |
590 | } else { | 792 | } else { |
591 | struct smp_cmd_security_req cp; | 793 | struct smp_cmd_security_req cp; |
592 | cp.auth_req = SMP_AUTH_NONE; | 794 | cp.auth_req = authreq; |
593 | smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); | 795 | smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); |
594 | } | 796 | } |
595 | 797 | ||
@@ -618,7 +820,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) | |||
618 | 820 | ||
619 | skb_pull(skb, sizeof(*rp)); | 821 | skb_pull(skb, sizeof(*rp)); |
620 | 822 | ||
621 | hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size, | 823 | hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, |
622 | rp->ediv, rp->rand, smp->tk); | 824 | rp->ediv, rp->rand, smp->tk); |
623 | 825 | ||
624 | smp_distribute_keys(conn, 1); | 826 | smp_distribute_keys(conn, 1); |
@@ -646,6 +848,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) | |||
646 | break; | 848 | break; |
647 | 849 | ||
648 | case SMP_CMD_PAIRING_FAIL: | 850 | case SMP_CMD_PAIRING_FAIL: |
851 | smp_failure(conn, skb->data[0], 0); | ||
649 | reason = 0; | 852 | reason = 0; |
650 | err = -EPERM; | 853 | err = -EPERM; |
651 | break; | 854 | break; |
@@ -691,8 +894,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) | |||
691 | 894 | ||
692 | done: | 895 | done: |
693 | if (reason) | 896 | if (reason) |
694 | smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), | 897 | smp_failure(conn, reason, 1); |
695 | &reason); | ||
696 | 898 | ||
697 | kfree_skb(skb); | 899 | kfree_skb(skb); |
698 | return err; | 900 | return err; |
@@ -781,7 +983,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) | |||
781 | 983 | ||
782 | if (conn->hcon->out || force) { | 984 | if (conn->hcon->out || force) { |
783 | clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); | 985 | clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); |
784 | del_timer(&conn->security_timer); | 986 | cancel_delayed_work_sync(&conn->security_timer); |
785 | smp_chan_destroy(conn); | 987 | smp_chan_destroy(conn); |
786 | } | 988 | } |
787 | 989 | ||