aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-11-09 14:54:33 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-11-09 14:54:33 -0500
commit312fef7d18adda2be822d31916547f84ed6af28b (patch)
tree095d936b751b0a92946fcf710fb74dea7f8f33f7 /net
parent5e819059a20b0fc5a71875f28b4cae359e38d85a (diff)
parent2aeabcbedd51aef94b61d05b57246d1db4984453 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-next
Conflicts: net/bluetooth/l2cap_sock.c net/bluetooth/mgmt.c
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/core.c13
-rw-r--r--net/bluetooth/hci_conn.c69
-rw-r--r--net/bluetooth/hci_core.c399
-rw-r--r--net/bluetooth/hci_event.c221
-rw-r--r--net/bluetooth/hci_sysfs.c40
-rw-r--r--net/bluetooth/hidp/core.c157
-rw-r--r--net/bluetooth/l2cap_core.c947
-rw-r--r--net/bluetooth/l2cap_sock.c90
-rw-r--r--net/bluetooth/mgmt.c500
-rw-r--r--net/bluetooth/rfcomm/core.c65
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c45
-rw-r--r--net/bluetooth/smp.c3
14 files changed, 1728 insertions, 836 deletions
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961ec..a6cd856046ab 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem);
65static struct bnep_session *__bnep_get_session(u8 *dst) 65static struct bnep_session *__bnep_get_session(u8 *dst)
66{ 66{
67 struct bnep_session *s; 67 struct bnep_session *s;
68 struct list_head *p;
69 68
70 BT_DBG(""); 69 BT_DBG("");
71 70
72 list_for_each(p, &bnep_session_list) { 71 list_for_each_entry(s, &bnep_session_list, list)
73 s = list_entry(p, struct bnep_session, list);
74 if (!compare_ether_addr(dst, s->eh.h_source)) 72 if (!compare_ether_addr(dst, s->eh.h_source))
75 return s; 73 return s;
76 } 74
77 return NULL; 75 return NULL;
78} 76}
79 77
@@ -667,17 +665,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
667 665
668int bnep_get_connlist(struct bnep_connlist_req *req) 666int bnep_get_connlist(struct bnep_connlist_req *req)
669{ 667{
670 struct list_head *p; 668 struct bnep_session *s;
671 int err = 0, n = 0; 669 int err = 0, n = 0;
672 670
673 down_read(&bnep_session_sem); 671 down_read(&bnep_session_sem);
674 672
675 list_for_each(p, &bnep_session_list) { 673 list_for_each_entry(s, &bnep_session_list, list) {
676 struct bnep_session *s;
677 struct bnep_conninfo ci; 674 struct bnep_conninfo ci;
678 675
679 s = list_entry(p, struct bnep_session, list);
680
681 __bnep_copy_ci(&ci, s); 676 __bnep_copy_ci(&ci, s);
682 677
683 if (copy_to_user(req->ci, &ci, sizeof(ci))) { 678 if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9dc..9e8940b24bba 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list);
53static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) 53static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
54{ 54{
55 struct cmtp_session *session; 55 struct cmtp_session *session;
56 struct list_head *p;
57 56
58 BT_DBG(""); 57 BT_DBG("");
59 58
60 list_for_each(p, &cmtp_session_list) { 59 list_for_each_entry(session, &cmtp_session_list, list)
61 session = list_entry(p, struct cmtp_session, list);
62 if (!bacmp(bdaddr, &session->bdaddr)) 60 if (!bacmp(bdaddr, &session->bdaddr))
63 return session; 61 return session;
64 } 62
65 return NULL; 63 return NULL;
66} 64}
67 65
@@ -431,19 +429,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
431 429
432int cmtp_get_connlist(struct cmtp_connlist_req *req) 430int cmtp_get_connlist(struct cmtp_connlist_req *req)
433{ 431{
434 struct list_head *p; 432 struct cmtp_session *session;
435 int err = 0, n = 0; 433 int err = 0, n = 0;
436 434
437 BT_DBG(""); 435 BT_DBG("");
438 436
439 down_read(&cmtp_session_sem); 437 down_read(&cmtp_session_sem);
440 438
441 list_for_each(p, &cmtp_session_list) { 439 list_for_each_entry(session, &cmtp_session_list, list) {
442 struct cmtp_session *session;
443 struct cmtp_conninfo ci; 440 struct cmtp_conninfo ci;
444 441
445 session = list_entry(p, struct cmtp_session, list);
446
447 __cmtp_copy_session(session, &ci); 442 __cmtp_copy_session(session, &ci);
448 443
449 if (copy_to_user(req->ci, &ci, sizeof(ci))) { 444 if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af7237cd92..de0b93e45980 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -374,6 +374,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
374 374
375 skb_queue_head_init(&conn->data_q); 375 skb_queue_head_init(&conn->data_q);
376 376
377 hci_chan_hash_init(conn);
378
377 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 379 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
378 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 380 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
379 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 381 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
@@ -432,6 +434,8 @@ int hci_conn_del(struct hci_conn *conn)
432 434
433 tasklet_disable(&hdev->tx_task); 435 tasklet_disable(&hdev->tx_task);
434 436
437 hci_chan_hash_flush(conn);
438
435 hci_conn_hash_del(hdev, conn); 439 hci_conn_hash_del(hdev, conn);
436 if (hdev->notify) 440 if (hdev->notify)
437 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 441 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -453,16 +457,13 @@ int hci_conn_del(struct hci_conn *conn)
453struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 457struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454{ 458{
455 int use_src = bacmp(src, BDADDR_ANY); 459 int use_src = bacmp(src, BDADDR_ANY);
456 struct hci_dev *hdev = NULL; 460 struct hci_dev *hdev = NULL, *d;
457 struct list_head *p;
458 461
459 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 462 BT_DBG("%s -> %s", batostr(src), batostr(dst));
460 463
461 read_lock_bh(&hci_dev_list_lock); 464 read_lock_bh(&hci_dev_list_lock);
462 465
463 list_for_each(p, &hci_dev_list) { 466 list_for_each_entry(d, &hci_dev_list, list) {
464 struct hci_dev *d = list_entry(p, struct hci_dev, list);
465
466 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 467 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
467 continue; 468 continue;
468 469
@@ -819,7 +820,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
819 820
820 c->state = BT_CLOSED; 821 c->state = BT_CLOSED;
821 822
822 hci_proto_disconn_cfm(c, 0x16); 823 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
823 hci_conn_del(c); 824 hci_conn_del(c);
824 } 825 }
825} 826}
@@ -855,10 +856,10 @@ EXPORT_SYMBOL(hci_conn_put_device);
855 856
856int hci_get_conn_list(void __user *arg) 857int hci_get_conn_list(void __user *arg)
857{ 858{
859 register struct hci_conn *c;
858 struct hci_conn_list_req req, *cl; 860 struct hci_conn_list_req req, *cl;
859 struct hci_conn_info *ci; 861 struct hci_conn_info *ci;
860 struct hci_dev *hdev; 862 struct hci_dev *hdev;
861 struct list_head *p;
862 int n = 0, size, err; 863 int n = 0, size, err;
863 864
864 if (copy_from_user(&req, arg, sizeof(req))) 865 if (copy_from_user(&req, arg, sizeof(req)))
@@ -882,10 +883,7 @@ int hci_get_conn_list(void __user *arg)
882 ci = cl->conn_info; 883 ci = cl->conn_info;
883 884
884 hci_dev_lock_bh(hdev); 885 hci_dev_lock_bh(hdev);
885 list_for_each(p, &hdev->conn_hash.list) { 886 list_for_each_entry(c, &hdev->conn_hash.list, list) {
886 register struct hci_conn *c;
887 c = list_entry(p, struct hci_conn, list);
888
889 bacpy(&(ci + n)->bdaddr, &c->dst); 887 bacpy(&(ci + n)->bdaddr, &c->dst);
890 (ci + n)->handle = c->handle; 888 (ci + n)->handle = c->handle;
891 (ci + n)->type = c->type; 889 (ci + n)->type = c->type;
@@ -956,3 +954,52 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
956 954
957 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; 955 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
958} 956}
957
958struct hci_chan *hci_chan_create(struct hci_conn *conn)
959{
960 struct hci_dev *hdev = conn->hdev;
961 struct hci_chan *chan;
962
963 BT_DBG("%s conn %p", hdev->name, conn);
964
965 chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
966 if (!chan)
967 return NULL;
968
969 chan->conn = conn;
970 skb_queue_head_init(&chan->data_q);
971
972 tasklet_disable(&hdev->tx_task);
973 hci_chan_hash_add(conn, chan);
974 tasklet_enable(&hdev->tx_task);
975
976 return chan;
977}
978
979int hci_chan_del(struct hci_chan *chan)
980{
981 struct hci_conn *conn = chan->conn;
982 struct hci_dev *hdev = conn->hdev;
983
984 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
985
986 tasklet_disable(&hdev->tx_task);
987 hci_chan_hash_del(conn, chan);
988 tasklet_enable(&hdev->tx_task);
989
990 skb_queue_purge(&chan->data_q);
991 kfree(chan);
992
993 return 0;
994}
995
996void hci_chan_hash_flush(struct hci_conn *conn)
997{
998 struct hci_chan_hash *h = &conn->chan_hash;
999 struct hci_chan *chan, *tmp;
1000
1001 BT_DBG("conn %p", conn);
1002
1003 list_for_each_entry_safe(chan, tmp, &h->list, list)
1004 hci_chan_del(chan);
1005}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae33ae36..fb3feeb185d7 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -319,8 +319,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
319 * Device is held on return. */ 319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index) 320struct hci_dev *hci_dev_get(int index)
321{ 321{
322 struct hci_dev *hdev = NULL; 322 struct hci_dev *hdev = NULL, *d;
323 struct list_head *p;
324 323
325 BT_DBG("%d", index); 324 BT_DBG("%d", index);
326 325
@@ -328,8 +327,7 @@ struct hci_dev *hci_dev_get(int index)
328 return NULL; 327 return NULL;
329 328
330 read_lock(&hci_dev_list_lock); 329 read_lock(&hci_dev_list_lock);
331 list_for_each(p, &hci_dev_list) { 330 list_for_each_entry(d, &hci_dev_list, list) {
332 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 if (d->id == index) { 331 if (d->id == index) {
334 hdev = hci_dev_hold(d); 332 hdev = hci_dev_hold(d);
335 break; 333 break;
@@ -551,8 +549,11 @@ int hci_dev_open(__u16 dev)
551 hci_dev_hold(hdev); 549 hci_dev_hold(hdev);
552 set_bit(HCI_UP, &hdev->flags); 550 set_bit(HCI_UP, &hdev->flags);
553 hci_notify(hdev, HCI_DEV_UP); 551 hci_notify(hdev, HCI_DEV_UP);
554 if (!test_bit(HCI_SETUP, &hdev->flags)) 552 if (!test_bit(HCI_SETUP, &hdev->flags)) {
555 mgmt_powered(hdev->id, 1); 553 hci_dev_lock_bh(hdev);
554 mgmt_powered(hdev, 1);
555 hci_dev_unlock_bh(hdev);
556 }
556 } else { 557 } else {
557 /* Init failed, cleanup */ 558 /* Init failed, cleanup */
558 tasklet_kill(&hdev->rx_task); 559 tasklet_kill(&hdev->rx_task);
@@ -597,6 +598,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
597 tasklet_kill(&hdev->rx_task); 598 tasklet_kill(&hdev->rx_task);
598 tasklet_kill(&hdev->tx_task); 599 tasklet_kill(&hdev->tx_task);
599 600
601 if (hdev->discov_timeout > 0) {
602 cancel_delayed_work(&hdev->discov_off);
603 hdev->discov_timeout = 0;
604 }
605
606 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
607 cancel_delayed_work(&hdev->power_off);
608
600 hci_dev_lock_bh(hdev); 609 hci_dev_lock_bh(hdev);
601 inquiry_cache_flush(hdev); 610 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev); 611 hci_conn_hash_flush(hdev);
@@ -636,7 +645,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
636 * and no tasks are scheduled. */ 645 * and no tasks are scheduled. */
637 hdev->close(hdev); 646 hdev->close(hdev);
638 647
639 mgmt_powered(hdev->id, 0); 648 hci_dev_lock_bh(hdev);
649 mgmt_powered(hdev, 0);
650 hci_dev_unlock_bh(hdev);
640 651
641 /* Clear flags */ 652 /* Clear flags */
642 hdev->flags = 0; 653 hdev->flags = 0;
@@ -794,9 +805,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
794 805
795int hci_get_dev_list(void __user *arg) 806int hci_get_dev_list(void __user *arg)
796{ 807{
808 struct hci_dev *hdev;
797 struct hci_dev_list_req *dl; 809 struct hci_dev_list_req *dl;
798 struct hci_dev_req *dr; 810 struct hci_dev_req *dr;
799 struct list_head *p;
800 int n = 0, size, err; 811 int n = 0, size, err;
801 __u16 dev_num; 812 __u16 dev_num;
802 813
@@ -815,12 +826,9 @@ int hci_get_dev_list(void __user *arg)
815 dr = dl->dev_req; 826 dr = dl->dev_req;
816 827
817 read_lock_bh(&hci_dev_list_lock); 828 read_lock_bh(&hci_dev_list_lock);
818 list_for_each(p, &hci_dev_list) { 829 list_for_each_entry(hdev, &hci_dev_list, list) {
819 struct hci_dev *hdev; 830 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
820 831 cancel_delayed_work(&hdev->power_off);
821 hdev = list_entry(p, struct hci_dev, list);
822
823 hci_del_off_timer(hdev);
824 832
825 if (!test_bit(HCI_MGMT, &hdev->flags)) 833 if (!test_bit(HCI_MGMT, &hdev->flags))
826 set_bit(HCI_PAIRABLE, &hdev->flags); 834 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -855,7 +863,8 @@ int hci_get_dev_info(void __user *arg)
855 if (!hdev) 863 if (!hdev)
856 return -ENODEV; 864 return -ENODEV;
857 865
858 hci_del_off_timer(hdev); 866 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
867 cancel_delayed_work_sync(&hdev->power_off);
859 868
860 if (!test_bit(HCI_MGMT, &hdev->flags)) 869 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags); 870 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +921,7 @@ struct hci_dev *hci_alloc_dev(void)
912 if (!hdev) 921 if (!hdev)
913 return NULL; 922 return NULL;
914 923
924 hci_init_sysfs(hdev);
915 skb_queue_head_init(&hdev->driver_init); 925 skb_queue_head_init(&hdev->driver_init);
916 926
917 return hdev; 927 return hdev;
@@ -938,39 +948,41 @@ static void hci_power_on(struct work_struct *work)
938 return; 948 return;
939 949
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags)) 950 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer, 951 queue_delayed_work(hdev->workqueue, &hdev->power_off,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 952 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943 953
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) 954 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id); 955 mgmt_index_added(hdev);
946} 956}
947 957
948static void hci_power_off(struct work_struct *work) 958static void hci_power_off(struct work_struct *work)
949{ 959{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off); 960 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 power_off.work);
951 962
952 BT_DBG("%s", hdev->name); 963 BT_DBG("%s", hdev->name);
953 964
965 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966
954 hci_dev_close(hdev->id); 967 hci_dev_close(hdev->id);
955} 968}
956 969
957static void hci_auto_off(unsigned long data) 970static void hci_discov_off(struct work_struct *work)
958{ 971{
959 struct hci_dev *hdev = (struct hci_dev *) data; 972 struct hci_dev *hdev;
973 u8 scan = SCAN_PAGE;
974
975 hdev = container_of(work, struct hci_dev, discov_off.work);
960 976
961 BT_DBG("%s", hdev->name); 977 BT_DBG("%s", hdev->name);
962 978
963 clear_bit(HCI_AUTO_OFF, &hdev->flags); 979 hci_dev_lock_bh(hdev);
964 980
965 queue_work(hdev->workqueue, &hdev->power_off); 981 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
966}
967 982
968void hci_del_off_timer(struct hci_dev *hdev) 983 hdev->discov_timeout = 0;
969{
970 BT_DBG("%s", hdev->name);
971 984
972 clear_bit(HCI_AUTO_OFF, &hdev->flags); 985 hci_dev_unlock_bh(hdev);
973 del_timer(&hdev->off_timer);
974} 986}
975 987
976int hci_uuids_clear(struct hci_dev *hdev) 988int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1019,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
1007 1019
1008struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1020struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009{ 1021{
1010 struct list_head *p; 1022 struct link_key *k;
1011
1012 list_for_each(p, &hdev->link_keys) {
1013 struct link_key *k;
1014
1015 k = list_entry(p, struct link_key, list);
1016 1023
1024 list_for_each_entry(k, &hdev->link_keys, list)
1017 if (bacmp(bdaddr, &k->bdaddr) == 0) 1025 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 return k; 1026 return k;
1019 }
1020 1027
1021 return NULL; 1028 return NULL;
1022} 1029}
@@ -1138,7 +1145,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1138 1145
1139 persistent = hci_persistent_key(hdev, conn, type, old_key_type); 1146 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140 1147
1141 mgmt_new_key(hdev->id, key, persistent); 1148 mgmt_new_link_key(hdev, key, persistent);
1142 1149
1143 if (!persistent) { 1150 if (!persistent) {
1144 list_del(&key->list); 1151 list_del(&key->list);
@@ -1181,7 +1188,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1181 memcpy(id->rand, rand, sizeof(id->rand)); 1188 memcpy(id->rand, rand, sizeof(id->rand));
1182 1189
1183 if (new_key) 1190 if (new_key)
1184 mgmt_new_key(hdev->id, key, old_key_type); 1191 mgmt_new_link_key(hdev, key, old_key_type);
1185 1192
1186 return 0; 1193 return 0;
1187} 1194}
@@ -1279,16 +1286,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1279struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 1286struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1280 bdaddr_t *bdaddr) 1287 bdaddr_t *bdaddr)
1281{ 1288{
1282 struct list_head *p; 1289 struct bdaddr_list *b;
1283
1284 list_for_each(p, &hdev->blacklist) {
1285 struct bdaddr_list *b;
1286
1287 b = list_entry(p, struct bdaddr_list, list);
1288 1290
1291 list_for_each_entry(b, &hdev->blacklist, list)
1289 if (bacmp(bdaddr, &b->bdaddr) == 0) 1292 if (bacmp(bdaddr, &b->bdaddr) == 0)
1290 return b; 1293 return b;
1291 }
1292 1294
1293 return NULL; 1295 return NULL;
1294} 1296}
@@ -1327,7 +1329,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327 1329
1328 list_add(&entry->list, &hdev->blacklist); 1330 list_add(&entry->list, &hdev->blacklist);
1329 1331
1330 return mgmt_device_blocked(hdev->id, bdaddr); 1332 return mgmt_device_blocked(hdev, bdaddr);
1331} 1333}
1332 1334
1333int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) 1335int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -1346,7 +1348,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1346 list_del(&entry->list); 1348 list_del(&entry->list);
1347 kfree(entry); 1349 kfree(entry);
1348 1350
1349 return mgmt_device_unblocked(hdev->id, bdaddr); 1351 return mgmt_device_unblocked(hdev, bdaddr);
1350} 1352}
1351 1353
1352static void hci_clear_adv_cache(unsigned long arg) 1354static void hci_clear_adv_cache(unsigned long arg)
@@ -1425,7 +1427,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
1425int hci_register_dev(struct hci_dev *hdev) 1427int hci_register_dev(struct hci_dev *hdev)
1426{ 1428{
1427 struct list_head *head = &hci_dev_list, *p; 1429 struct list_head *head = &hci_dev_list, *p;
1428 int i, id = 0; 1430 int i, id, error;
1429 1431
1430 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, 1432 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1431 hdev->bus, hdev->owner); 1433 hdev->bus, hdev->owner);
@@ -1433,6 +1435,11 @@ int hci_register_dev(struct hci_dev *hdev)
1433 if (!hdev->open || !hdev->close || !hdev->destruct) 1435 if (!hdev->open || !hdev->close || !hdev->destruct)
1434 return -EINVAL; 1436 return -EINVAL;
1435 1437
1438 /* Do not allow HCI_AMP devices to register at index 0,
1439 * so the index can be used as the AMP controller ID.
1440 */
1441 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1442
1436 write_lock_bh(&hci_dev_list_lock); 1443 write_lock_bh(&hci_dev_list_lock);
1437 1444
1438 /* Find first available device id */ 1445 /* Find first available device id */
@@ -1479,6 +1486,8 @@ int hci_register_dev(struct hci_dev *hdev)
1479 1486
1480 hci_conn_hash_init(hdev); 1487 hci_conn_hash_init(hdev);
1481 1488
1489 INIT_LIST_HEAD(&hdev->mgmt_pending);
1490
1482 INIT_LIST_HEAD(&hdev->blacklist); 1491 INIT_LIST_HEAD(&hdev->blacklist);
1483 1492
1484 INIT_LIST_HEAD(&hdev->uuids); 1493 INIT_LIST_HEAD(&hdev->uuids);
@@ -1492,8 +1501,9 @@ int hci_register_dev(struct hci_dev *hdev)
1492 (unsigned long) hdev); 1501 (unsigned long) hdev);
1493 1502
1494 INIT_WORK(&hdev->power_on, hci_power_on); 1503 INIT_WORK(&hdev->power_on, hci_power_on);
1495 INIT_WORK(&hdev->power_off, hci_power_off); 1504 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1496 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1505
1506 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1497 1507
1498 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1508 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499 1509
@@ -1502,10 +1512,14 @@ int hci_register_dev(struct hci_dev *hdev)
1502 write_unlock_bh(&hci_dev_list_lock); 1512 write_unlock_bh(&hci_dev_list_lock);
1503 1513
1504 hdev->workqueue = create_singlethread_workqueue(hdev->name); 1514 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1505 if (!hdev->workqueue) 1515 if (!hdev->workqueue) {
1506 goto nomem; 1516 error = -ENOMEM;
1517 goto err;
1518 }
1507 1519
1508 hci_register_sysfs(hdev); 1520 error = hci_add_sysfs(hdev);
1521 if (error < 0)
1522 goto err_wqueue;
1509 1523
1510 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1524 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1511 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1525 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1524,17 +1538,19 @@ int hci_register_dev(struct hci_dev *hdev)
1524 1538
1525 return id; 1539 return id;
1526 1540
1527nomem: 1541err_wqueue:
1542 destroy_workqueue(hdev->workqueue);
1543err:
1528 write_lock_bh(&hci_dev_list_lock); 1544 write_lock_bh(&hci_dev_list_lock);
1529 list_del(&hdev->list); 1545 list_del(&hdev->list);
1530 write_unlock_bh(&hci_dev_list_lock); 1546 write_unlock_bh(&hci_dev_list_lock);
1531 1547
1532 return -ENOMEM; 1548 return error;
1533} 1549}
1534EXPORT_SYMBOL(hci_register_dev); 1550EXPORT_SYMBOL(hci_register_dev);
1535 1551
1536/* Unregister HCI device */ 1552/* Unregister HCI device */
1537int hci_unregister_dev(struct hci_dev *hdev) 1553void hci_unregister_dev(struct hci_dev *hdev)
1538{ 1554{
1539 int i; 1555 int i;
1540 1556
@@ -1550,8 +1566,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
1550 kfree_skb(hdev->reassembly[i]); 1566 kfree_skb(hdev->reassembly[i]);
1551 1567
1552 if (!test_bit(HCI_INIT, &hdev->flags) && 1568 if (!test_bit(HCI_INIT, &hdev->flags) &&
1553 !test_bit(HCI_SETUP, &hdev->flags)) 1569 !test_bit(HCI_SETUP, &hdev->flags)) {
1554 mgmt_index_removed(hdev->id); 1570 hci_dev_lock_bh(hdev);
1571 mgmt_index_removed(hdev);
1572 hci_dev_unlock_bh(hdev);
1573 }
1574
1575 /* mgmt_index_removed should take care of emptying the
1576 * pending list */
1577 BUG_ON(!list_empty(&hdev->mgmt_pending));
1555 1578
1556 hci_notify(hdev, HCI_DEV_UNREG); 1579 hci_notify(hdev, HCI_DEV_UNREG);
1557 1580
@@ -1560,9 +1583,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
1560 rfkill_destroy(hdev->rfkill); 1583 rfkill_destroy(hdev->rfkill);
1561 } 1584 }
1562 1585
1563 hci_unregister_sysfs(hdev); 1586 hci_del_sysfs(hdev);
1564 1587
1565 hci_del_off_timer(hdev);
1566 del_timer(&hdev->adv_timer); 1588 del_timer(&hdev->adv_timer);
1567 1589
1568 destroy_workqueue(hdev->workqueue); 1590 destroy_workqueue(hdev->workqueue);
@@ -1576,8 +1598,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
1576 hci_dev_unlock_bh(hdev); 1598 hci_dev_unlock_bh(hdev);
1577 1599
1578 __hci_dev_put(hdev); 1600 __hci_dev_put(hdev);
1579
1580 return 0;
1581} 1601}
1582EXPORT_SYMBOL(hci_unregister_dev); 1602EXPORT_SYMBOL(hci_unregister_dev);
1583 1603
@@ -1948,23 +1968,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1948 hdr->dlen = cpu_to_le16(len); 1968 hdr->dlen = cpu_to_le16(len);
1949} 1969}
1950 1970
1951void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1971static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1972 struct sk_buff *skb, __u16 flags)
1952{ 1973{
1953 struct hci_dev *hdev = conn->hdev; 1974 struct hci_dev *hdev = conn->hdev;
1954 struct sk_buff *list; 1975 struct sk_buff *list;
1955 1976
1956 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1957
1958 skb->dev = (void *) hdev;
1959 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1960 hci_add_acl_hdr(skb, conn->handle, flags);
1961
1962 list = skb_shinfo(skb)->frag_list; 1977 list = skb_shinfo(skb)->frag_list;
1963 if (!list) { 1978 if (!list) {
1964 /* Non fragmented */ 1979 /* Non fragmented */
1965 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1980 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1966 1981
1967 skb_queue_tail(&conn->data_q, skb); 1982 skb_queue_tail(queue, skb);
1968 } else { 1983 } else {
1969 /* Fragmented */ 1984 /* Fragmented */
1970 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1985 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1987,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1972 skb_shinfo(skb)->frag_list = NULL; 1987 skb_shinfo(skb)->frag_list = NULL;
1973 1988
1974 /* Queue all fragments atomically */ 1989 /* Queue all fragments atomically */
1975 spin_lock_bh(&conn->data_q.lock); 1990 spin_lock_bh(&queue->lock);
1976 1991
1977 __skb_queue_tail(&conn->data_q, skb); 1992 __skb_queue_tail(queue, skb);
1978 1993
1979 flags &= ~ACL_START; 1994 flags &= ~ACL_START;
1980 flags |= ACL_CONT; 1995 flags |= ACL_CONT;
@@ -1987,11 +2002,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1987 2002
1988 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 2003 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1989 2004
1990 __skb_queue_tail(&conn->data_q, skb); 2005 __skb_queue_tail(queue, skb);
1991 } while (list); 2006 } while (list);
1992 2007
1993 spin_unlock_bh(&conn->data_q.lock); 2008 spin_unlock_bh(&queue->lock);
1994 } 2009 }
2010}
2011
2012void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2013{
2014 struct hci_conn *conn = chan->conn;
2015 struct hci_dev *hdev = conn->hdev;
2016
2017 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2018
2019 skb->dev = (void *) hdev;
2020 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2021 hci_add_acl_hdr(skb, conn->handle, flags);
2022
2023 hci_queue_acl(conn, &chan->data_q, skb, flags);
1995 2024
1996 tasklet_schedule(&hdev->tx_task); 2025 tasklet_schedule(&hdev->tx_task);
1997} 2026}
@@ -2026,16 +2055,12 @@ EXPORT_SYMBOL(hci_send_sco);
2026static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2055static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2027{ 2056{
2028 struct hci_conn_hash *h = &hdev->conn_hash; 2057 struct hci_conn_hash *h = &hdev->conn_hash;
2029 struct hci_conn *conn = NULL; 2058 struct hci_conn *conn = NULL, *c;
2030 int num = 0, min = ~0; 2059 int num = 0, min = ~0;
2031 struct list_head *p;
2032 2060
2033 /* We don't have to lock device here. Connections are always 2061 /* We don't have to lock device here. Connections are always
2034 * added and removed with TX task disabled. */ 2062 * added and removed with TX task disabled. */
2035 list_for_each(p, &h->list) { 2063 list_for_each_entry(c, &h->list, list) {
2036 struct hci_conn *c;
2037 c = list_entry(p, struct hci_conn, list);
2038
2039 if (c->type != type || skb_queue_empty(&c->data_q)) 2064 if (c->type != type || skb_queue_empty(&c->data_q))
2040 continue; 2065 continue;
2041 2066
@@ -2084,14 +2109,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2084static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2109static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2085{ 2110{
2086 struct hci_conn_hash *h = &hdev->conn_hash; 2111 struct hci_conn_hash *h = &hdev->conn_hash;
2087 struct list_head *p; 2112 struct hci_conn *c;
2088 struct hci_conn *c;
2089 2113
2090 BT_ERR("%s link tx timeout", hdev->name); 2114 BT_ERR("%s link tx timeout", hdev->name);
2091 2115
2092 /* Kill stalled connections */ 2116 /* Kill stalled connections */
2093 list_for_each(p, &h->list) { 2117 list_for_each_entry(c, &h->list, list) {
2094 c = list_entry(p, struct hci_conn, list);
2095 if (c->type == type && c->sent) { 2118 if (c->type == type && c->sent) {
2096 BT_ERR("%s killing stalled connection %s", 2119 BT_ERR("%s killing stalled connection %s",
2097 hdev->name, batostr(&c->dst)); 2120 hdev->name, batostr(&c->dst));
@@ -2100,11 +2123,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2100 } 2123 }
2101} 2124}
2102 2125
2103static inline void hci_sched_acl(struct hci_dev *hdev) 2126static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2127 int *quote)
2104{ 2128{
2129 struct hci_conn_hash *h = &hdev->conn_hash;
2130 struct hci_chan *chan = NULL;
2131 int num = 0, min = ~0, cur_prio = 0;
2105 struct hci_conn *conn; 2132 struct hci_conn *conn;
2133 int cnt, q, conn_num = 0;
2134
2135 BT_DBG("%s", hdev->name);
2136
2137 list_for_each_entry(conn, &h->list, list) {
2138 struct hci_chan_hash *ch;
2139 struct hci_chan *tmp;
2140
2141 if (conn->type != type)
2142 continue;
2143
2144 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2145 continue;
2146
2147 conn_num++;
2148
2149 ch = &conn->chan_hash;
2150
2151 list_for_each_entry(tmp, &ch->list, list) {
2152 struct sk_buff *skb;
2153
2154 if (skb_queue_empty(&tmp->data_q))
2155 continue;
2156
2157 skb = skb_peek(&tmp->data_q);
2158 if (skb->priority < cur_prio)
2159 continue;
2160
2161 if (skb->priority > cur_prio) {
2162 num = 0;
2163 min = ~0;
2164 cur_prio = skb->priority;
2165 }
2166
2167 num++;
2168
2169 if (conn->sent < min) {
2170 min = conn->sent;
2171 chan = tmp;
2172 }
2173 }
2174
2175 if (hci_conn_num(hdev, type) == conn_num)
2176 break;
2177 }
2178
2179 if (!chan)
2180 return NULL;
2181
2182 switch (chan->conn->type) {
2183 case ACL_LINK:
2184 cnt = hdev->acl_cnt;
2185 break;
2186 case SCO_LINK:
2187 case ESCO_LINK:
2188 cnt = hdev->sco_cnt;
2189 break;
2190 case LE_LINK:
2191 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2192 break;
2193 default:
2194 cnt = 0;
2195 BT_ERR("Unknown link type");
2196 }
2197
2198 q = cnt / num;
2199 *quote = q ? q : 1;
2200 BT_DBG("chan %p quote %d", chan, *quote);
2201 return chan;
2202}
2203
2204static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2205{
2206 struct hci_conn_hash *h = &hdev->conn_hash;
2207 struct hci_conn *conn;
2208 int num = 0;
2209
2210 BT_DBG("%s", hdev->name);
2211
2212 list_for_each_entry(conn, &h->list, list) {
2213 struct hci_chan_hash *ch;
2214 struct hci_chan *chan;
2215
2216 if (conn->type != type)
2217 continue;
2218
2219 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2220 continue;
2221
2222 num++;
2223
2224 ch = &conn->chan_hash;
2225 list_for_each_entry(chan, &ch->list, list) {
2226 struct sk_buff *skb;
2227
2228 if (chan->sent) {
2229 chan->sent = 0;
2230 continue;
2231 }
2232
2233 if (skb_queue_empty(&chan->data_q))
2234 continue;
2235
2236 skb = skb_peek(&chan->data_q);
2237 if (skb->priority >= HCI_PRIO_MAX - 1)
2238 continue;
2239
2240 skb->priority = HCI_PRIO_MAX - 1;
2241
2242 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2243 skb->priority);
2244 }
2245
2246 if (hci_conn_num(hdev, type) == num)
2247 break;
2248 }
2249}
2250
2251static inline void hci_sched_acl(struct hci_dev *hdev)
2252{
2253 struct hci_chan *chan;
2106 struct sk_buff *skb; 2254 struct sk_buff *skb;
2107 int quote; 2255 int quote;
2256 unsigned int cnt;
2108 2257
2109 BT_DBG("%s", hdev->name); 2258 BT_DBG("%s", hdev->name);
2110 2259
@@ -2118,19 +2267,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2118 hci_link_tx_to(hdev, ACL_LINK); 2267 hci_link_tx_to(hdev, ACL_LINK);
2119 } 2268 }
2120 2269
2121 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 2270 cnt = hdev->acl_cnt;
2122 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2271
2123 BT_DBG("skb %p len %d", skb, skb->len); 2272 while (hdev->acl_cnt &&
2273 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2274 u32 priority = (skb_peek(&chan->data_q))->priority;
2275 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2276 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2277 skb->len, skb->priority);
2124 2278
2125 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); 2279 /* Stop if priority has changed */
2280 if (skb->priority < priority)
2281 break;
2282
2283 skb = skb_dequeue(&chan->data_q);
2284
2285 hci_conn_enter_active_mode(chan->conn,
2286 bt_cb(skb)->force_active);
2126 2287
2127 hci_send_frame(skb); 2288 hci_send_frame(skb);
2128 hdev->acl_last_tx = jiffies; 2289 hdev->acl_last_tx = jiffies;
2129 2290
2130 hdev->acl_cnt--; 2291 hdev->acl_cnt--;
2131 conn->sent++; 2292 chan->sent++;
2293 chan->conn->sent++;
2132 } 2294 }
2133 } 2295 }
2296
2297 if (cnt != hdev->acl_cnt)
2298 hci_prio_recalculate(hdev, ACL_LINK);
2134} 2299}
2135 2300
2136/* Schedule SCO */ 2301/* Schedule SCO */
@@ -2182,9 +2347,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2182 2347
2183static inline void hci_sched_le(struct hci_dev *hdev) 2348static inline void hci_sched_le(struct hci_dev *hdev)
2184{ 2349{
2185 struct hci_conn *conn; 2350 struct hci_chan *chan;
2186 struct sk_buff *skb; 2351 struct sk_buff *skb;
2187 int quote, cnt; 2352 int quote, cnt, tmp;
2188 2353
2189 BT_DBG("%s", hdev->name); 2354 BT_DBG("%s", hdev->name);
2190 2355
@@ -2200,21 +2365,35 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2200 } 2365 }
2201 2366
2202 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 2367 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2203 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) { 2368 tmp = cnt;
2204 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2369 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2205 BT_DBG("skb %p len %d", skb, skb->len); 2370 u32 priority = (skb_peek(&chan->data_q))->priority;
2371 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2372 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2373 skb->len, skb->priority);
2374
2375 /* Stop if priority has changed */
2376 if (skb->priority < priority)
2377 break;
2378
2379 skb = skb_dequeue(&chan->data_q);
2206 2380
2207 hci_send_frame(skb); 2381 hci_send_frame(skb);
2208 hdev->le_last_tx = jiffies; 2382 hdev->le_last_tx = jiffies;
2209 2383
2210 cnt--; 2384 cnt--;
2211 conn->sent++; 2385 chan->sent++;
2386 chan->conn->sent++;
2212 } 2387 }
2213 } 2388 }
2389
2214 if (hdev->le_pkts) 2390 if (hdev->le_pkts)
2215 hdev->le_cnt = cnt; 2391 hdev->le_cnt = cnt;
2216 else 2392 else
2217 hdev->acl_cnt = cnt; 2393 hdev->acl_cnt = cnt;
2394
2395 if (cnt != tmp)
2396 hci_prio_recalculate(hdev, LE_LINK);
2218} 2397}
2219 2398
2220static void hci_tx_task(unsigned long arg) 2399static void hci_tx_task(unsigned long arg)
@@ -2407,3 +2586,31 @@ static void hci_cmd_task(unsigned long arg)
2407 } 2586 }
2408 } 2587 }
2409} 2588}
2589
2590int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2591{
2592 /* General inquiry access code (GIAC) */
2593 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2594 struct hci_cp_inquiry cp;
2595
2596 BT_DBG("%s", hdev->name);
2597
2598 if (test_bit(HCI_INQUIRY, &hdev->flags))
2599 return -EINPROGRESS;
2600
2601 memset(&cp, 0, sizeof(cp));
2602 memcpy(&cp.lap, lap, sizeof(cp.lap));
2603 cp.length = length;
2604
2605 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2606}
2607
2608int hci_cancel_inquiry(struct hci_dev *hdev)
2609{
2610 BT_DBG("%s", hdev->name);
2611
2612 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2613 return -EPERM;
2614
2615 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2616}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0d..a89cf1f24e47 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,9 +58,11 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
58 if (status) 58 if (status)
59 return; 59 return;
60 60
61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 61 clear_bit(HCI_INQUIRY, &hdev->flags);
62 test_bit(HCI_MGMT, &hdev->flags)) 62
63 mgmt_discovering(hdev->id, 0); 63 hci_dev_lock(hdev);
64 mgmt_discovering(hdev, 0);
65 hci_dev_unlock(hdev);
64 66
65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
66 68
@@ -76,10 +78,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
76 if (status) 78 if (status)
77 return; 79 return;
78 80
79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
80 test_bit(HCI_MGMT, &hdev->flags))
81 mgmt_discovering(hdev->id, 0);
82
83 hci_conn_check_pending(hdev); 81 hci_conn_check_pending(hdev);
84} 82}
85 83
@@ -205,13 +203,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
205 if (!sent) 203 if (!sent)
206 return; 204 return;
207 205
206 hci_dev_lock(hdev);
207
208 if (test_bit(HCI_MGMT, &hdev->flags)) 208 if (test_bit(HCI_MGMT, &hdev->flags))
209 mgmt_set_local_name_complete(hdev->id, sent, status); 209 mgmt_set_local_name_complete(hdev, sent, status);
210 210
211 if (status) 211 if (status == 0)
212 return; 212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
213 213
214 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 214 hci_dev_unlock(hdev);
215} 215}
216 216
217static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 217static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +274,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 274
275static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 275static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
276{ 276{
277 __u8 status = *((__u8 *) skb->data); 277 __u8 param, status = *((__u8 *) skb->data);
278 int old_pscan, old_iscan;
278 void *sent; 279 void *sent;
279 280
280 BT_DBG("%s status 0x%x", hdev->name, status); 281 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +284,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
283 if (!sent) 284 if (!sent)
284 return; 285 return;
285 286
286 if (!status) { 287 param = *((__u8 *) sent);
287 __u8 param = *((__u8 *) sent);
288 int old_pscan, old_iscan;
289
290 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
291 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
292 288
293 if (param & SCAN_INQUIRY) { 289 hci_dev_lock(hdev);
294 set_bit(HCI_ISCAN, &hdev->flags);
295 if (!old_iscan)
296 mgmt_discoverable(hdev->id, 1);
297 } else if (old_iscan)
298 mgmt_discoverable(hdev->id, 0);
299 290
300 if (param & SCAN_PAGE) { 291 if (status != 0) {
301 set_bit(HCI_PSCAN, &hdev->flags); 292 mgmt_write_scan_failed(hdev, param, status);
302 if (!old_pscan) 293 hdev->discov_timeout = 0;
303 mgmt_connectable(hdev->id, 1); 294 goto done;
304 } else if (old_pscan)
305 mgmt_connectable(hdev->id, 0);
306 } 295 }
307 296
297 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
298 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
299
300 if (param & SCAN_INQUIRY) {
301 set_bit(HCI_ISCAN, &hdev->flags);
302 if (!old_iscan)
303 mgmt_discoverable(hdev, 1);
304 if (hdev->discov_timeout > 0) {
305 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
306 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
307 to);
308 }
309 } else if (old_iscan)
310 mgmt_discoverable(hdev, 0);
311
312 if (param & SCAN_PAGE) {
313 set_bit(HCI_PSCAN, &hdev->flags);
314 if (!old_pscan)
315 mgmt_connectable(hdev, 1);
316 } else if (old_pscan)
317 mgmt_connectable(hdev, 0);
318
319done:
320 hci_dev_unlock(hdev);
308 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 321 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
309} 322}
310 323
@@ -748,6 +761,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
748 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 761 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
749} 762}
750 763
764static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
765 struct sk_buff *skb)
766{
767 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
768
769 BT_DBG("%s status 0x%x", hdev->name, rp->status);
770
771 if (rp->status)
772 return;
773
774 hdev->amp_status = rp->amp_status;
775 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
776 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
777 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
778 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
779 hdev->amp_type = rp->amp_type;
780 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
781 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
782 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
783 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
784
785 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
786}
787
751static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 788static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
752 struct sk_buff *skb) 789 struct sk_buff *skb)
753{ 790{
@@ -804,19 +841,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
804 841
805 BT_DBG("%s status 0x%x", hdev->name, rp->status); 842 BT_DBG("%s status 0x%x", hdev->name, rp->status);
806 843
844 hci_dev_lock(hdev);
845
807 if (test_bit(HCI_MGMT, &hdev->flags)) 846 if (test_bit(HCI_MGMT, &hdev->flags))
808 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); 847 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
809 848
810 if (rp->status != 0) 849 if (rp->status != 0)
811 return; 850 goto unlock;
812 851
813 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 852 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
814 if (!cp) 853 if (!cp)
815 return; 854 goto unlock;
816 855
817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 856 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
818 if (conn) 857 if (conn)
819 conn->pin_length = cp->pin_len; 858 conn->pin_length = cp->pin_len;
859
860unlock:
861 hci_dev_unlock(hdev);
820} 862}
821 863
822static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 864static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +867,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
825 867
826 BT_DBG("%s status 0x%x", hdev->name, rp->status); 868 BT_DBG("%s status 0x%x", hdev->name, rp->status);
827 869
870 hci_dev_lock(hdev);
871
828 if (test_bit(HCI_MGMT, &hdev->flags)) 872 if (test_bit(HCI_MGMT, &hdev->flags))
829 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, 873 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
830 rp->status); 874 rp->status);
875
876 hci_dev_unlock(hdev);
831} 877}
878
832static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 879static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
833 struct sk_buff *skb) 880 struct sk_buff *skb)
834{ 881{
@@ -855,9 +902,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
855 902
856 BT_DBG("%s status 0x%x", hdev->name, rp->status); 903 BT_DBG("%s status 0x%x", hdev->name, rp->status);
857 904
905 hci_dev_lock(hdev);
906
858 if (test_bit(HCI_MGMT, &hdev->flags)) 907 if (test_bit(HCI_MGMT, &hdev->flags))
859 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, 908 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
860 rp->status); 909 rp->status);
910
911 hci_dev_unlock(hdev);
861} 912}
862 913
863static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 914static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +918,13 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
867 918
868 BT_DBG("%s status 0x%x", hdev->name, rp->status); 919 BT_DBG("%s status 0x%x", hdev->name, rp->status);
869 920
921 hci_dev_lock(hdev);
922
870 if (test_bit(HCI_MGMT, &hdev->flags)) 923 if (test_bit(HCI_MGMT, &hdev->flags))
871 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, 924 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
872 rp->status); 925 rp->status);
926
927 hci_dev_unlock(hdev);
873} 928}
874 929
875static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 930static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +934,10 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
879 934
880 BT_DBG("%s status 0x%x", hdev->name, rp->status); 935 BT_DBG("%s status 0x%x", hdev->name, rp->status);
881 936
882 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, 937 hci_dev_lock(hdev);
938 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
883 rp->randomizer, rp->status); 939 rp->randomizer, rp->status);
940 hci_dev_unlock(hdev);
884} 941}
885 942
886static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 943static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -955,12 +1012,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
955 if (status) { 1012 if (status) {
956 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1013 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
957 hci_conn_check_pending(hdev); 1014 hci_conn_check_pending(hdev);
1015 hci_dev_lock(hdev);
1016 if (test_bit(HCI_MGMT, &hdev->flags))
1017 mgmt_inquiry_failed(hdev, status);
1018 hci_dev_unlock(hdev);
958 return; 1019 return;
959 } 1020 }
960 1021
961 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && 1022 set_bit(HCI_INQUIRY, &hdev->flags);
962 test_bit(HCI_MGMT, &hdev->flags)) 1023
963 mgmt_discovering(hdev->id, 1); 1024 hci_dev_lock(hdev);
1025 mgmt_discovering(hdev, 1);
1026 hci_dev_unlock(hdev);
964} 1027}
965 1028
966static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1029static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1402,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1339 1402
1340 BT_DBG("%s status %d", hdev->name, status); 1403 BT_DBG("%s status %d", hdev->name, status);
1341 1404
1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
1343 test_bit(HCI_MGMT, &hdev->flags))
1344 mgmt_discovering(hdev->id, 0);
1345
1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1405 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1347 1406
1348 hci_conn_check_pending(hdev); 1407 hci_conn_check_pending(hdev);
1408
1409 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1410 return;
1411
1412 hci_dev_lock(hdev);
1413 mgmt_discovering(hdev, 0);
1414 hci_dev_unlock(hdev);
1349} 1415}
1350 1416
1351static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1417static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1427,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1361 1427
1362 hci_dev_lock(hdev); 1428 hci_dev_lock(hdev);
1363 1429
1364 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1365
1366 if (test_bit(HCI_MGMT, &hdev->flags))
1367 mgmt_discovering(hdev->id, 1);
1368 }
1369
1370 for (; num_rsp; num_rsp--, info++) { 1430 for (; num_rsp; num_rsp--, info++) {
1371 bacpy(&data.bdaddr, &info->bdaddr); 1431 bacpy(&data.bdaddr, &info->bdaddr);
1372 data.pscan_rep_mode = info->pscan_rep_mode; 1432 data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1377,8 +1437,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1377 data.rssi = 0x00; 1437 data.rssi = 0x00;
1378 data.ssp_mode = 0x00; 1438 data.ssp_mode = 0x00;
1379 hci_inquiry_cache_update(hdev, &data); 1439 hci_inquiry_cache_update(hdev, &data);
1380 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, 1440 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
1381 NULL); 1441 info->dev_class, 0, NULL);
1382 } 1442 }
1383 1443
1384 hci_dev_unlock(hdev); 1444 hci_dev_unlock(hdev);
@@ -1412,7 +1472,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1412 conn->state = BT_CONFIG; 1472 conn->state = BT_CONFIG;
1413 hci_conn_hold(conn); 1473 hci_conn_hold(conn);
1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1474 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1415 mgmt_connected(hdev->id, &ev->bdaddr, conn->type); 1475 mgmt_connected(hdev, &ev->bdaddr, conn->type);
1416 } else 1476 } else
1417 conn->state = BT_CONNECTED; 1477 conn->state = BT_CONNECTED;
1418 1478
@@ -1444,7 +1504,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1444 } else { 1504 } else {
1445 conn->state = BT_CLOSED; 1505 conn->state = BT_CLOSED;
1446 if (conn->type == ACL_LINK) 1506 if (conn->type == ACL_LINK)
1447 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 1507 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1508 ev->status);
1448 } 1509 }
1449 1510
1450 if (conn->type == ACL_LINK) 1511 if (conn->type == ACL_LINK)
@@ -1531,7 +1592,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1531 struct hci_cp_reject_conn_req cp; 1592 struct hci_cp_reject_conn_req cp;
1532 1593
1533 bacpy(&cp.bdaddr, &ev->bdaddr); 1594 bacpy(&cp.bdaddr, &ev->bdaddr);
1534 cp.reason = 0x0f; 1595 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1535 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1596 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1536 } 1597 }
1537} 1598}
@@ -1544,7 +1605,9 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1544 BT_DBG("%s status %d", hdev->name, ev->status); 1605 BT_DBG("%s status %d", hdev->name, ev->status);
1545 1606
1546 if (ev->status) { 1607 if (ev->status) {
1547 mgmt_disconnect_failed(hdev->id); 1608 hci_dev_lock(hdev);
1609 mgmt_disconnect_failed(hdev);
1610 hci_dev_unlock(hdev);
1548 return; 1611 return;
1549 } 1612 }
1550 1613
@@ -1557,7 +1620,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1557 conn->state = BT_CLOSED; 1620 conn->state = BT_CLOSED;
1558 1621
1559 if (conn->type == ACL_LINK || conn->type == LE_LINK) 1622 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1560 mgmt_disconnected(hdev->id, &conn->dst); 1623 mgmt_disconnected(hdev, &conn->dst, conn->type);
1561 1624
1562 hci_proto_disconn_cfm(conn, ev->reason); 1625 hci_proto_disconn_cfm(conn, ev->reason);
1563 hci_conn_del(conn); 1626 hci_conn_del(conn);
@@ -1588,7 +1651,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1588 conn->sec_level = conn->pending_sec_level; 1651 conn->sec_level = conn->pending_sec_level;
1589 } 1652 }
1590 } else { 1653 } else {
1591 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1654 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1592 } 1655 }
1593 1656
1594 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1657 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1706,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1643 hci_dev_lock(hdev); 1706 hci_dev_lock(hdev);
1644 1707
1645 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) 1708 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1646 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); 1709 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1647 1710
1648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1711 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1649 if (!conn) 1712 if (!conn)
@@ -1898,6 +1961,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1898 hci_cc_write_ca_timeout(hdev, skb); 1961 hci_cc_write_ca_timeout(hdev, skb);
1899 break; 1962 break;
1900 1963
1964 case HCI_OP_READ_LOCAL_AMP_INFO:
1965 hci_cc_read_local_amp_info(hdev, skb);
1966 break;
1967
1901 case HCI_OP_DELETE_STORED_LINK_KEY: 1968 case HCI_OP_DELETE_STORED_LINK_KEY:
1902 hci_cc_delete_stored_link_key(hdev, skb); 1969 hci_cc_delete_stored_link_key(hdev, skb);
1903 break; 1970 break;
@@ -2029,7 +2096,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2029 2096
2030 case HCI_OP_DISCONNECT: 2097 case HCI_OP_DISCONNECT:
2031 if (ev->status != 0) 2098 if (ev->status != 0)
2032 mgmt_disconnect_failed(hdev->id); 2099 mgmt_disconnect_failed(hdev);
2033 break; 2100 break;
2034 2101
2035 case HCI_OP_LE_CREATE_CONN: 2102 case HCI_OP_LE_CREATE_CONN:
@@ -2194,7 +2261,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2194 else 2261 else
2195 secure = 0; 2262 secure = 0;
2196 2263
2197 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2264 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2198 } 2265 }
2199 2266
2200unlock: 2267unlock:
@@ -2363,12 +2430,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2363 2430
2364 hci_dev_lock(hdev); 2431 hci_dev_lock(hdev);
2365 2432
2366 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2367
2368 if (test_bit(HCI_MGMT, &hdev->flags))
2369 mgmt_discovering(hdev->id, 1);
2370 }
2371
2372 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2433 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2373 struct inquiry_info_with_rssi_and_pscan_mode *info; 2434 struct inquiry_info_with_rssi_and_pscan_mode *info;
2374 info = (void *) (skb->data + 1); 2435 info = (void *) (skb->data + 1);
@@ -2383,7 +2444,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2383 data.rssi = info->rssi; 2444 data.rssi = info->rssi;
2384 data.ssp_mode = 0x00; 2445 data.ssp_mode = 0x00;
2385 hci_inquiry_cache_update(hdev, &data); 2446 hci_inquiry_cache_update(hdev, &data);
2386 mgmt_device_found(hdev->id, &info->bdaddr, 2447 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
2387 info->dev_class, info->rssi, 2448 info->dev_class, info->rssi,
2388 NULL); 2449 NULL);
2389 } 2450 }
@@ -2400,7 +2461,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2400 data.rssi = info->rssi; 2461 data.rssi = info->rssi;
2401 data.ssp_mode = 0x00; 2462 data.ssp_mode = 0x00;
2402 hci_inquiry_cache_update(hdev, &data); 2463 hci_inquiry_cache_update(hdev, &data);
2403 mgmt_device_found(hdev->id, &info->bdaddr, 2464 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
2404 info->dev_class, info->rssi, 2465 info->dev_class, info->rssi,
2405 NULL); 2466 NULL);
2406 } 2467 }
@@ -2531,12 +2592,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2531 if (!num_rsp) 2592 if (!num_rsp)
2532 return; 2593 return;
2533 2594
2534 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2535
2536 if (test_bit(HCI_MGMT, &hdev->flags))
2537 mgmt_discovering(hdev->id, 1);
2538 }
2539
2540 hci_dev_lock(hdev); 2595 hci_dev_lock(hdev);
2541 2596
2542 for (; num_rsp; num_rsp--, info++) { 2597 for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2604,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2549 data.rssi = info->rssi; 2604 data.rssi = info->rssi;
2550 data.ssp_mode = 0x01; 2605 data.ssp_mode = 0x01;
2551 hci_inquiry_cache_update(hdev, &data); 2606 hci_inquiry_cache_update(hdev, &data);
2552 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 2607 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
2553 info->rssi, info->data); 2608 info->dev_class, info->rssi, info->data);
2554 } 2609 }
2555 2610
2556 hci_dev_unlock(hdev); 2611 hci_dev_unlock(hdev);
@@ -2614,7 +2669,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2614 struct hci_cp_io_capability_neg_reply cp; 2669 struct hci_cp_io_capability_neg_reply cp;
2615 2670
2616 bacpy(&cp.bdaddr, &ev->bdaddr); 2671 bacpy(&cp.bdaddr, &ev->bdaddr);
2617 cp.reason = 0x18; /* Pairing not allowed */ 2672 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2618 2673
2619 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2674 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2620 sizeof(cp), &cp); 2675 sizeof(cp), &cp);
@@ -2706,7 +2761,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2706 } 2761 }
2707 2762
2708confirm: 2763confirm:
2709 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, 2764 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
2710 confirm_hint); 2765 confirm_hint);
2711 2766
2712unlock: 2767unlock:
@@ -2732,7 +2787,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
2732 * event gets always produced as initiator and is also mapped to 2787 * event gets always produced as initiator and is also mapped to
2733 * the mgmt_auth_failed event */ 2788 * the mgmt_auth_failed event */
2734 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 2789 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2735 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 2790 mgmt_auth_failed(hdev, &conn->dst, ev->status);
2736 2791
2737 hci_conn_put(conn); 2792 hci_conn_put(conn);
2738 2793
@@ -2813,14 +2868,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2813 } 2868 }
2814 2869
2815 if (ev->status) { 2870 if (ev->status) {
2816 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 2871 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, ev->status);
2817 hci_proto_connect_cfm(conn, ev->status); 2872 hci_proto_connect_cfm(conn, ev->status);
2818 conn->state = BT_CLOSED; 2873 conn->state = BT_CLOSED;
2819 hci_conn_del(conn); 2874 hci_conn_del(conn);
2820 goto unlock; 2875 goto unlock;
2821 } 2876 }
2822 2877
2823 mgmt_connected(hdev->id, &ev->bdaddr, conn->type); 2878 mgmt_connected(hdev, &ev->bdaddr, conn->type);
2824 2879
2825 conn->sec_level = BT_SECURITY_LOW; 2880 conn->sec_level = BT_SECURITY_LOW;
2826 conn->handle = __le16_to_cpu(ev->handle); 2881 conn->handle = __le16_to_cpu(ev->handle);
@@ -3104,5 +3159,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3104 kfree_skb(skb); 3159 kfree_skb(skb);
3105} 3160}
3106 3161
3107module_param(enable_le, bool, 0444); 3162module_param(enable_le, bool, 0644);
3108MODULE_PARM_DESC(enable_le, "Enable LE support"); 3163MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 22f1a6c87035..f8e6aa386cef 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -435,17 +435,12 @@ static const struct file_operations inquiry_cache_fops = {
435static int blacklist_show(struct seq_file *f, void *p) 435static int blacklist_show(struct seq_file *f, void *p)
436{ 436{
437 struct hci_dev *hdev = f->private; 437 struct hci_dev *hdev = f->private;
438 struct list_head *l; 438 struct bdaddr_list *b;
439 439
440 hci_dev_lock_bh(hdev); 440 hci_dev_lock_bh(hdev);
441 441
442 list_for_each(l, &hdev->blacklist) { 442 list_for_each_entry(b, &hdev->blacklist, list)
443 struct bdaddr_list *b;
444
445 b = list_entry(l, struct bdaddr_list, list);
446
447 seq_printf(f, "%s\n", batostr(&b->bdaddr)); 443 seq_printf(f, "%s\n", batostr(&b->bdaddr));
448 }
449 444
450 hci_dev_unlock_bh(hdev); 445 hci_dev_unlock_bh(hdev);
451 446
@@ -484,17 +479,12 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
484static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
485{ 480{
486 struct hci_dev *hdev = f->private; 481 struct hci_dev *hdev = f->private;
487 struct list_head *l; 482 struct bt_uuid *uuid;
488 483
489 hci_dev_lock_bh(hdev); 484 hci_dev_lock_bh(hdev);
490 485
491 list_for_each(l, &hdev->uuids) { 486 list_for_each_entry(uuid, &hdev->uuids, list)
492 struct bt_uuid *uuid;
493
494 uuid = list_entry(l, struct bt_uuid, list);
495
496 print_bt_uuid(f, uuid->uuid); 487 print_bt_uuid(f, uuid->uuid);
497 }
498 488
499 hci_dev_unlock_bh(hdev); 489 hci_dev_unlock_bh(hdev);
500 490
@@ -542,22 +532,28 @@ static int auto_accept_delay_get(void *data, u64 *val)
542DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
543 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
544 534
545int hci_register_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
536{
537 struct device *dev = &hdev->dev;
538
539 dev->type = &bt_host;
540 dev->class = bt_class;
541
542 dev_set_drvdata(dev, hdev);
543 device_initialize(dev);
544}
545
546int hci_add_sysfs(struct hci_dev *hdev)
546{ 547{
547 struct device *dev = &hdev->dev; 548 struct device *dev = &hdev->dev;
548 int err; 549 int err;
549 550
550 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 551 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
551 552
552 dev->type = &bt_host;
553 dev->class = bt_class;
554 dev->parent = hdev->parent; 553 dev->parent = hdev->parent;
555
556 dev_set_name(dev, "%s", hdev->name); 554 dev_set_name(dev, "%s", hdev->name);
557 555
558 dev_set_drvdata(dev, hdev); 556 err = device_add(dev);
559
560 err = device_register(dev);
561 if (err < 0) 557 if (err < 0)
562 return err; 558 return err;
563 559
@@ -581,7 +577,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
581 return 0; 577 return 0;
582} 578}
583 579
584void hci_unregister_sysfs(struct hci_dev *hdev) 580void hci_del_sysfs(struct hci_dev *hdev)
585{ 581{
586 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 582 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
587 583
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 075a3e920caf..3c2d888925d7 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
81static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) 81static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
82{ 82{
83 struct hidp_session *session; 83 struct hidp_session *session;
84 struct list_head *p;
85 84
86 BT_DBG(""); 85 BT_DBG("");
87 86
88 list_for_each(p, &hidp_session_list) { 87 list_for_each_entry(session, &hidp_session_list, list) {
89 session = list_entry(p, struct hidp_session, list);
90 if (!bacmp(bdaddr, &session->bdaddr)) 88 if (!bacmp(bdaddr, &session->bdaddr))
91 return session; 89 return session;
92 } 90 }
91
93 return NULL; 92 return NULL;
94} 93}
95 94
96static void __hidp_link_session(struct hidp_session *session) 95static void __hidp_link_session(struct hidp_session *session)
97{ 96{
98 __module_get(THIS_MODULE);
99 list_add(&session->list, &hidp_session_list); 97 list_add(&session->list, &hidp_session_list);
100
101 hci_conn_hold_device(session->conn);
102} 98}
103 99
104static void __hidp_unlink_session(struct hidp_session *session) 100static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session)
106 hci_conn_put_device(session->conn); 102 hci_conn_put_device(session->conn);
107 103
108 list_del(&session->list); 104 list_del(&session->list);
109 module_put(THIS_MODULE);
110} 105}
111 106
112static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) 107static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
255 250
256 BT_DBG("session %p data %p size %d", session, data, size); 251 BT_DBG("session %p data %p size %d", session, data, size);
257 252
253 if (atomic_read(&session->terminate))
254 return -EIO;
255
258 skb = alloc_skb(size + 1, GFP_ATOMIC); 256 skb = alloc_skb(size + 1, GFP_ATOMIC);
259 if (!skb) { 257 if (!skb) {
260 BT_ERR("Can't allocate memory for new frame"); 258 BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid,
329 struct sk_buff *skb; 327 struct sk_buff *skb;
330 size_t len; 328 size_t len;
331 int numbered_reports = hid->report_enum[report_type].numbered; 329 int numbered_reports = hid->report_enum[report_type].numbered;
330 int ret;
332 331
333 switch (report_type) { 332 switch (report_type) {
334 case HID_FEATURE_REPORT: 333 case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
352 session->waiting_report_number = numbered_reports ? report_number : -1; 351 session->waiting_report_number = numbered_reports ? report_number : -1;
353 set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); 352 set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
354 data[0] = report_number; 353 data[0] = report_number;
355 if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1)) 354 ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
356 goto err_eio; 355 if (ret)
356 goto err;
357 357
358 /* Wait for the return of the report. The returned report 358 /* Wait for the return of the report. The returned report
359 gets put in session->report_return. */ 359 gets put in session->report_return. */
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid,
365 5*HZ); 365 5*HZ);
366 if (res == 0) { 366 if (res == 0) {
367 /* timeout */ 367 /* timeout */
368 goto err_eio; 368 ret = -EIO;
369 goto err;
369 } 370 }
370 if (res < 0) { 371 if (res < 0) {
371 /* signal */ 372 /* signal */
372 goto err_restartsys; 373 ret = -ERESTARTSYS;
374 goto err;
373 } 375 }
374 } 376 }
375 377
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid,
390 392
391 return len; 393 return len;
392 394
393err_restartsys: 395err:
394 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
395 mutex_unlock(&session->report_mutex);
396 return -ERESTARTSYS;
397err_eio:
398 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); 396 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
399 mutex_unlock(&session->report_mutex); 397 mutex_unlock(&session->report_mutex);
400 return -EIO; 398 return ret;
401} 399}
402 400
403static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, 401static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
422 420
423 /* Set up our wait, and send the report request to the device. */ 421 /* Set up our wait, and send the report request to the device. */
424 set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); 422 set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
425 if (hidp_send_ctrl_message(hid->driver_data, report_type, 423 ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
426 data, count)) { 424 count);
427 ret = -ENOMEM; 425 if (ret)
428 goto err; 426 goto err;
429 }
430 427
431 /* Wait for the ACK from the device. */ 428 /* Wait for the ACK from the device. */
432 while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { 429 while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session,
496 case HIDP_HSHK_ERR_INVALID_REPORT_ID: 493 case HIDP_HSHK_ERR_INVALID_REPORT_ID:
497 case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: 494 case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
498 case HIDP_HSHK_ERR_INVALID_PARAMETER: 495 case HIDP_HSHK_ERR_INVALID_PARAMETER:
499 if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) { 496 if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
500 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
501 wake_up_interruptible(&session->report_queue); 497 wake_up_interruptible(&session->report_queue);
502 } 498
503 /* FIXME: Call into SET_ GET_ handlers here */ 499 /* FIXME: Call into SET_ GET_ handlers here */
504 break; 500 break;
505 501
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session,
520 } 516 }
521 517
522 /* Wake up the waiting thread. */ 518 /* Wake up the waiting thread. */
523 if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { 519 if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
524 clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
525 wake_up_interruptible(&session->report_queue); 520 wake_up_interruptible(&session->report_queue);
526 }
527} 521}
528 522
529static void hidp_process_hid_control(struct hidp_session *session, 523static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
663 return kernel_sendmsg(sock, &msg, &iv, 1, len); 657 return kernel_sendmsg(sock, &msg, &iv, 1, len);
664} 658}
665 659
666static void hidp_process_transmit(struct hidp_session *session) 660static void hidp_process_intr_transmit(struct hidp_session *session)
667{ 661{
668 struct sk_buff *skb; 662 struct sk_buff *skb;
669 663
670 BT_DBG("session %p", session); 664 BT_DBG("session %p", session);
671 665
672 while ((skb = skb_dequeue(&session->ctrl_transmit))) { 666 while ((skb = skb_dequeue(&session->intr_transmit))) {
673 if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { 667 if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
674 skb_queue_head(&session->ctrl_transmit, skb); 668 skb_queue_head(&session->intr_transmit, skb);
675 break; 669 break;
676 } 670 }
677 671
678 hidp_set_timer(session); 672 hidp_set_timer(session);
679 kfree_skb(skb); 673 kfree_skb(skb);
680 } 674 }
675}
681 676
682 while ((skb = skb_dequeue(&session->intr_transmit))) { 677static void hidp_process_ctrl_transmit(struct hidp_session *session)
683 if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { 678{
684 skb_queue_head(&session->intr_transmit, skb); 679 struct sk_buff *skb;
680
681 BT_DBG("session %p", session);
682
683 while ((skb = skb_dequeue(&session->ctrl_transmit))) {
684 if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
685 skb_queue_head(&session->ctrl_transmit, skb);
685 break; 686 break;
686 } 687 }
687 688
@@ -700,6 +701,7 @@ static int hidp_session(void *arg)
700 701
701 BT_DBG("session %p", session); 702 BT_DBG("session %p", session);
702 703
704 __module_get(THIS_MODULE);
703 set_user_nice(current, -15); 705 set_user_nice(current, -15);
704 706
705 init_waitqueue_entry(&ctrl_wait, current); 707 init_waitqueue_entry(&ctrl_wait, current);
@@ -714,23 +716,25 @@ static int hidp_session(void *arg)
714 intr_sk->sk_state != BT_CONNECTED) 716 intr_sk->sk_state != BT_CONNECTED)
715 break; 717 break;
716 718
717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 719 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
718 skb_orphan(skb); 720 skb_orphan(skb);
719 if (!skb_linearize(skb)) 721 if (!skb_linearize(skb))
720 hidp_recv_ctrl_frame(session, skb); 722 hidp_recv_intr_frame(session, skb);
721 else 723 else
722 kfree_skb(skb); 724 kfree_skb(skb);
723 } 725 }
724 726
725 while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { 727 hidp_process_intr_transmit(session);
728
729 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
726 skb_orphan(skb); 730 skb_orphan(skb);
727 if (!skb_linearize(skb)) 731 if (!skb_linearize(skb))
728 hidp_recv_intr_frame(session, skb); 732 hidp_recv_ctrl_frame(session, skb);
729 else 733 else
730 kfree_skb(skb); 734 kfree_skb(skb);
731 } 735 }
732 736
733 hidp_process_transmit(session); 737 hidp_process_ctrl_transmit(session);
734 738
735 schedule(); 739 schedule();
736 set_current_state(TASK_INTERRUPTIBLE); 740 set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@ static int hidp_session(void *arg)
739 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 743 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
740 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); 744 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
741 745
746 clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
747 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
748 wake_up_interruptible(&session->report_queue);
749
742 down_write(&hidp_session_sem); 750 down_write(&hidp_session_sem);
743 751
744 hidp_del_timer(session); 752 hidp_del_timer(session);
@@ -772,34 +780,37 @@ static int hidp_session(void *arg)
772 780
773 kfree(session->rd_data); 781 kfree(session->rd_data);
774 kfree(session); 782 kfree(session);
783 module_put_and_exit(0);
775 return 0; 784 return 0;
776} 785}
777 786
778static struct device *hidp_get_device(struct hidp_session *session) 787static struct hci_conn *hidp_get_connection(struct hidp_session *session)
779{ 788{
780 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; 789 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
781 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; 790 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
782 struct device *device = NULL; 791 struct hci_conn *conn;
783 struct hci_dev *hdev; 792 struct hci_dev *hdev;
784 793
785 hdev = hci_get_route(dst, src); 794 hdev = hci_get_route(dst, src);
786 if (!hdev) 795 if (!hdev)
787 return NULL; 796 return NULL;
788 797
789 session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 798 hci_dev_lock_bh(hdev);
790 if (session->conn) 799 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
791 device = &session->conn->dev; 800 if (conn)
801 hci_conn_hold_device(conn);
802 hci_dev_unlock_bh(hdev);
792 803
793 hci_dev_put(hdev); 804 hci_dev_put(hdev);
794 805
795 return device; 806 return conn;
796} 807}
797 808
798static int hidp_setup_input(struct hidp_session *session, 809static int hidp_setup_input(struct hidp_session *session,
799 struct hidp_connadd_req *req) 810 struct hidp_connadd_req *req)
800{ 811{
801 struct input_dev *input; 812 struct input_dev *input;
802 int err, i; 813 int i;
803 814
804 input = input_allocate_device(); 815 input = input_allocate_device();
805 if (!input) 816 if (!input)
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session,
842 input->relbit[0] |= BIT_MASK(REL_WHEEL); 853 input->relbit[0] |= BIT_MASK(REL_WHEEL);
843 } 854 }
844 855
845 input->dev.parent = hidp_get_device(session); 856 input->dev.parent = &session->conn->dev;
846 857
847 input->event = hidp_input_event; 858 input->event = hidp_input_event;
848 859
849 err = input_register_device(input);
850 if (err < 0) {
851 input_free_device(input);
852 session->input = NULL;
853 return err;
854 }
855
856 return 0; 860 return 0;
857} 861}
858 862
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session,
949 strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); 953 strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
950 strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); 954 strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
951 955
952 hid->dev.parent = hidp_get_device(session); 956 hid->dev.parent = &session->conn->dev;
953 hid->ll_driver = &hidp_hid_driver; 957 hid->ll_driver = &hidp_hid_driver;
954 958
955 hid->hid_get_raw_report = hidp_get_raw_report; 959 hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
976 bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) 980 bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
977 return -ENOTUNIQ; 981 return -ENOTUNIQ;
978 982
979 session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
980 if (!session)
981 return -ENOMEM;
982
983 BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); 983 BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
984 984
985 down_write(&hidp_session_sem); 985 down_write(&hidp_session_sem);
986 986
987 s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); 987 s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
988 if (s && s->state == BT_CONNECTED) { 988 if (s && s->state == BT_CONNECTED) {
989 err = -EEXIST; 989 up_write(&hidp_session_sem);
990 goto failed; 990 return -EEXIST;
991 }
992
993 session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
994 if (!session) {
995 up_write(&hidp_session_sem);
996 return -ENOMEM;
991 } 997 }
992 998
993 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); 999 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1003 session->intr_sock = intr_sock; 1009 session->intr_sock = intr_sock;
1004 session->state = BT_CONNECTED; 1010 session->state = BT_CONNECTED;
1005 1011
1012 session->conn = hidp_get_connection(session);
1013 if (!session->conn) {
1014 err = -ENOTCONN;
1015 goto failed;
1016 }
1017
1006 setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); 1018 setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
1007 1019
1008 skb_queue_head_init(&session->ctrl_transmit); 1020 skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1015 session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); 1027 session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
1016 session->idle_to = req->idle_to; 1028 session->idle_to = req->idle_to;
1017 1029
1030 __hidp_link_session(session);
1031
1018 if (req->rd_size > 0) { 1032 if (req->rd_size > 0) {
1019 err = hidp_setup_hid(session, req); 1033 err = hidp_setup_hid(session, req);
1020 if (err && err != -ENODEV) 1034 if (err)
1021 goto purge; 1035 goto purge;
1022 } 1036 }
1023 1037
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1027 goto purge; 1041 goto purge;
1028 } 1042 }
1029 1043
1030 __hidp_link_session(session);
1031
1032 hidp_set_timer(session); 1044 hidp_set_timer(session);
1033 1045
1034 if (session->hid) { 1046 if (session->hid) {
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1054 !session->waiting_for_startup); 1066 !session->waiting_for_startup);
1055 } 1067 }
1056 1068
1057 err = hid_add_device(session->hid); 1069 if (session->hid)
1070 err = hid_add_device(session->hid);
1071 else
1072 err = input_register_device(session->input);
1073
1058 if (err < 0) { 1074 if (err < 0) {
1059 atomic_inc(&session->terminate); 1075 atomic_inc(&session->terminate);
1060 wake_up_process(session->task); 1076 wake_up_process(session->task);
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1077unlink: 1093unlink:
1078 hidp_del_timer(session); 1094 hidp_del_timer(session);
1079 1095
1080 __hidp_unlink_session(session);
1081
1082 if (session->input) { 1096 if (session->input) {
1083 input_unregister_device(session->input); 1097 input_unregister_device(session->input);
1084 session->input = NULL; 1098 session->input = NULL;
@@ -1093,6 +1107,8 @@ unlink:
1093 session->rd_data = NULL; 1107 session->rd_data = NULL;
1094 1108
1095purge: 1109purge:
1110 __hidp_unlink_session(session);
1111
1096 skb_queue_purge(&session->ctrl_transmit); 1112 skb_queue_purge(&session->ctrl_transmit);
1097 skb_queue_purge(&session->intr_transmit); 1113 skb_queue_purge(&session->intr_transmit);
1098 1114
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1134 1150
1135int hidp_get_connlist(struct hidp_connlist_req *req) 1151int hidp_get_connlist(struct hidp_connlist_req *req)
1136{ 1152{
1137 struct list_head *p; 1153 struct hidp_session *session;
1138 int err = 0, n = 0; 1154 int err = 0, n = 0;
1139 1155
1140 BT_DBG(""); 1156 BT_DBG("");
1141 1157
1142 down_read(&hidp_session_sem); 1158 down_read(&hidp_session_sem);
1143 1159
1144 list_for_each(p, &hidp_session_list) { 1160 list_for_each_entry(session, &hidp_session_list, list) {
1145 struct hidp_session *session;
1146 struct hidp_conninfo ci; 1161 struct hidp_conninfo ci;
1147 1162
1148 session = list_entry(p, struct hidp_session, list);
1149
1150 __hidp_copy_session(session, &ci); 1163 __hidp_copy_session(session, &ci);
1151 1164
1152 if (copy_to_user(req->ci, &ci, sizeof(ci))) { 1165 if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1eecf2..e8a6837996cf 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -57,9 +57,10 @@
57#include <net/bluetooth/smp.h> 57#include <net/bluetooth/smp.h>
58 58
59int disable_ertm; 59int disable_ertm;
60int enable_hs;
60 61
61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62static u8 l2cap_fixed_chan[8] = { 0x02, }; 63static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
63 64
64static LIST_HEAD(chan_list); 65static LIST_HEAD(chan_list);
65static DEFINE_RWLOCK(chan_list_lock); 66static DEFINE_RWLOCK(chan_list_lock);
@@ -219,7 +220,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
219 220
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) 221static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{ 222{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); 223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
223 224
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) 225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan); 226 chan_hold(chan);
@@ -293,6 +294,8 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
293 294
294 atomic_set(&chan->refcnt, 1); 295 atomic_set(&chan->refcnt, 1);
295 296
297 BT_DBG("sk %p chan %p", sk, chan);
298
296 return chan; 299 return chan;
297} 300}
298 301
@@ -310,7 +313,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid); 314 chan->psm, chan->dcid);
312 315
313 conn->disc_reason = 0x13; 316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
314 317
315 chan->conn = conn; 318 chan->conn = conn;
316 319
@@ -337,6 +340,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
337 chan->omtu = L2CAP_DEFAULT_MTU; 340 chan->omtu = L2CAP_DEFAULT_MTU;
338 } 341 }
339 342
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
349
340 chan_hold(chan); 350 chan_hold(chan);
341 351
342 list_add(&chan->list, &conn->chan_l); 352 list_add(&chan->list, &conn->chan_l);
@@ -556,34 +566,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
556 flags = ACL_START; 566 flags = ACL_START;
557 567
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; 568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
570
571 hci_send_acl(conn->hchan, skb, flags);
572}
573
574static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
575{
576 struct hci_conn *hcon = chan->conn->hcon;
577 u16 flags;
578
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
580 skb->priority);
581
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
585 else
586 flags = ACL_START;
559 587
560 hci_send_acl(conn->hcon, skb, flags); 588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
561} 590}
562 591
563static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) 592static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
564{ 593{
565 struct sk_buff *skb; 594 struct sk_buff *skb;
566 struct l2cap_hdr *lh; 595 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn; 596 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2; 597 int count, hlen;
569 u8 flags;
570 598
571 if (chan->state != BT_CONNECTED) 599 if (chan->state != BT_CONNECTED)
572 return; 600 return;
573 601
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
604 else
605 hlen = L2CAP_ENH_HDR_SIZE;
606
574 if (chan->fcs == L2CAP_FCS_CRC16) 607 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2; 608 hlen += L2CAP_FCS_SIZE;
576 609
577 BT_DBG("chan %p, control 0x%2.2x", chan, control); 610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
578 611
579 count = min_t(unsigned int, conn->mtu, hlen); 612 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE; 613
614 control |= __set_sframe(chan);
581 615
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL; 617 control |= __set_ctrl_final(chan);
584 618
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) 619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL; 620 control |= __set_ctrl_poll(chan);
587 621
588 skb = bt_skb_alloc(count, GFP_ATOMIC); 622 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb) 623 if (!skb)
@@ -592,32 +626,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid); 628 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2)); 629
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
596 631
597 if (chan->fcs == L2CAP_FCS_CRC16) { 632 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2); 633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
599 put_unaligned_le16(fcs, skb_put(skb, 2)); 634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
600 } 635 }
601 636
602 if (lmp_no_flush_capable(conn->hcon->hdev)) 637 skb->priority = HCI_PRIO_MAX;
603 flags = ACL_START_NO_FLUSH; 638 l2cap_do_send(chan, skb);
604 else
605 flags = ACL_START;
606
607 bt_cb(skb)->force_active = chan->force_active;
608
609 hci_send_acl(chan->conn->hcon, skb, flags);
610} 639}
611 640
612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) 641static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
613{ 642{
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY; 644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
616 set_bit(CONN_RNR_SENT, &chan->conn_state); 645 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else 646 } else
618 control |= L2CAP_SUPER_RCV_READY; 647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
619 648
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 649 control |= __set_reqseq(chan, chan->buffer_seq);
621 650
622 l2cap_send_sframe(chan, control); 651 l2cap_send_sframe(chan, control);
623} 652}
@@ -947,7 +976,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
947 list_for_each_entry(chan, &conn->chan_l, list) { 976 list_for_each_entry(chan, &conn->chan_l, list) {
948 struct sock *sk = chan->sk; 977 struct sock *sk = chan->sk;
949 978
950 if (chan->force_reliable) 979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
951 sk->sk_err = err; 980 sk->sk_err = err;
952 } 981 }
953 982
@@ -986,6 +1015,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
986 chan->ops->close(chan->data); 1015 chan->ops->close(chan->data);
987 } 1016 }
988 1017
1018 hci_chan_del(conn->hchan);
1019
989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 del_timer_sync(&conn->info_timer); 1021 del_timer_sync(&conn->info_timer);
991 1022
@@ -1008,18 +1039,26 @@ static void security_timeout(unsigned long arg)
1008static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1039static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009{ 1040{
1010 struct l2cap_conn *conn = hcon->l2cap_data; 1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1011 1043
1012 if (conn || status) 1044 if (conn || status)
1013 return conn; 1045 return conn;
1014 1046
1047 hchan = hci_chan_create(hcon);
1048 if (!hchan)
1049 return NULL;
1050
1015 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); 1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 if (!conn) 1052 if (!conn) {
1053 hci_chan_del(hchan);
1017 return NULL; 1054 return NULL;
1055 }
1018 1056
1019 hcon->l2cap_data = conn; 1057 hcon->l2cap_data = conn;
1020 conn->hcon = hcon; 1058 conn->hcon = hcon;
1059 conn->hchan = hchan;
1021 1060
1022 BT_DBG("hcon %p conn %p", hcon, conn); 1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1023 1062
1024 if (hcon->hdev->le_mtu && hcon->type == LE_LINK) 1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 conn->mtu = hcon->hdev->le_mtu; 1064 conn->mtu = hcon->hdev->le_mtu;
@@ -1043,7 +1082,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1043 setup_timer(&conn->info_timer, l2cap_info_timeout, 1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 (unsigned long) conn); 1083 (unsigned long) conn);
1045 1084
1046 conn->disc_reason = 0x13; 1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1047 1086
1048 return conn; 1087 return conn;
1049} 1088}
@@ -1245,47 +1284,35 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1245 __clear_retrans_timer(chan); 1284 __clear_retrans_timer(chan);
1246} 1285}
1247 1286
1248static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249{
1250 struct hci_conn *hcon = chan->conn->hcon;
1251 u16 flags;
1252
1253 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254
1255 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 flags = ACL_START_NO_FLUSH;
1257 else
1258 flags = ACL_START;
1259
1260 bt_cb(skb)->force_active = chan->force_active;
1261 hci_send_acl(hcon, skb, flags);
1262}
1263
1264static void l2cap_streaming_send(struct l2cap_chan *chan) 1287static void l2cap_streaming_send(struct l2cap_chan *chan)
1265{ 1288{
1266 struct sk_buff *skb; 1289 struct sk_buff *skb;
1267 u16 control, fcs; 1290 u32 control;
1291 u16 fcs;
1268 1292
1269 while ((skb = skb_dequeue(&chan->tx_q))) { 1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1270 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); 1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1271 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1295 control |= __set_txseq(chan, chan->next_tx_seq);
1272 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); 1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1273 1297
1274 if (chan->fcs == L2CAP_FCS_CRC16) { 1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1275 fcs = crc16(0, (u8 *)skb->data, skb->len - 2); 1299 fcs = crc16(0, (u8 *)skb->data,
1276 put_unaligned_le16(fcs, skb->data + skb->len - 2); 1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1277 } 1303 }
1278 1304
1279 l2cap_do_send(chan, skb); 1305 l2cap_do_send(chan, skb);
1280 1306
1281 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; 1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1282 } 1308 }
1283} 1309}
1284 1310
1285static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) 1311static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1286{ 1312{
1287 struct sk_buff *skb, *tx_skb; 1313 struct sk_buff *skb, *tx_skb;
1288 u16 control, fcs; 1314 u16 fcs;
1315 u32 control;
1289 1316
1290 skb = skb_peek(&chan->tx_q); 1317 skb = skb_peek(&chan->tx_q);
1291 if (!skb) 1318 if (!skb)
@@ -1308,20 +1335,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1308 1335
1309 tx_skb = skb_clone(skb, GFP_ATOMIC); 1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 bt_cb(skb)->retries++; 1337 bt_cb(skb)->retries++;
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1338
1312 control &= L2CAP_CTRL_SAR; 1339 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1340 control &= __get_sar_mask(chan);
1313 1341
1314 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1342 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 control |= L2CAP_CTRL_FINAL; 1343 control |= __set_ctrl_final(chan);
1316 1344
1317 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1345 control |= __set_reqseq(chan, chan->buffer_seq);
1318 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1346 control |= __set_txseq(chan, tx_seq);
1319 1347
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1348 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1321 1349
1322 if (chan->fcs == L2CAP_FCS_CRC16) { 1350 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1351 fcs = crc16(0, (u8 *)tx_skb->data,
1324 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1352 tx_skb->len - L2CAP_FCS_SIZE);
1353 put_unaligned_le16(fcs,
1354 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1325 } 1355 }
1326 1356
1327 l2cap_do_send(chan, tx_skb); 1357 l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1360,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1330static int l2cap_ertm_send(struct l2cap_chan *chan) 1360static int l2cap_ertm_send(struct l2cap_chan *chan)
1331{ 1361{
1332 struct sk_buff *skb, *tx_skb; 1362 struct sk_buff *skb, *tx_skb;
1333 u16 control, fcs; 1363 u16 fcs;
1364 u32 control;
1334 int nsent = 0; 1365 int nsent = 0;
1335 1366
1336 if (chan->state != BT_CONNECTED) 1367 if (chan->state != BT_CONNECTED)
@@ -1348,20 +1379,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1348 1379
1349 bt_cb(skb)->retries++; 1380 bt_cb(skb)->retries++;
1350 1381
1351 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1352 control &= L2CAP_CTRL_SAR; 1383 control &= __get_sar_mask(chan);
1353 1384
1354 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 control |= L2CAP_CTRL_FINAL; 1386 control |= __set_ctrl_final(chan);
1356 1387
1357 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1388 control |= __set_reqseq(chan, chan->buffer_seq);
1358 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1389 control |= __set_txseq(chan, chan->next_tx_seq);
1359 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360 1390
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1361 1392
1362 if (chan->fcs == L2CAP_FCS_CRC16) { 1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1394 fcs = crc16(0, (u8 *)skb->data,
1364 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs, skb->data +
1397 tx_skb->len - L2CAP_FCS_SIZE);
1365 } 1398 }
1366 1399
1367 l2cap_do_send(chan, tx_skb); 1400 l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1402,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1369 __set_retrans_timer(chan); 1402 __set_retrans_timer(chan);
1370 1403
1371 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1404 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; 1405
1406 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1373 1407
1374 if (bt_cb(skb)->retries == 1) 1408 if (bt_cb(skb)->retries == 1)
1375 chan->unacked_frames++; 1409 chan->unacked_frames++;
@@ -1401,12 +1435,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1401 1435
1402static void l2cap_send_ack(struct l2cap_chan *chan) 1436static void l2cap_send_ack(struct l2cap_chan *chan)
1403{ 1437{
1404 u16 control = 0; 1438 u32 control = 0;
1405 1439
1406 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1440 control |= __set_reqseq(chan, chan->buffer_seq);
1407 1441
1408 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1442 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 control |= L2CAP_SUPER_RCV_NOT_READY; 1443 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1410 set_bit(CONN_RNR_SENT, &chan->conn_state); 1444 set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 l2cap_send_sframe(chan, control); 1445 l2cap_send_sframe(chan, control);
1412 return; 1446 return;
@@ -1415,20 +1449,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1415 if (l2cap_ertm_send(chan) > 0) 1449 if (l2cap_ertm_send(chan) > 0)
1416 return; 1450 return;
1417 1451
1418 control |= L2CAP_SUPER_RCV_READY; 1452 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1419 l2cap_send_sframe(chan, control); 1453 l2cap_send_sframe(chan, control);
1420} 1454}
1421 1455
1422static void l2cap_send_srejtail(struct l2cap_chan *chan) 1456static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423{ 1457{
1424 struct srej_list *tail; 1458 struct srej_list *tail;
1425 u16 control; 1459 u32 control;
1426 1460
1427 control = L2CAP_SUPER_SELECT_REJECT; 1461 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1428 control |= L2CAP_CTRL_FINAL; 1462 control |= __set_ctrl_final(chan);
1429 1463
1430 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1464 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1465 control |= __set_reqseq(chan, tail->tx_seq);
1432 1466
1433 l2cap_send_sframe(chan, control); 1467 l2cap_send_sframe(chan, control);
1434} 1468}
@@ -1456,6 +1490,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1456 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 return -EFAULT; 1491 return -EFAULT;
1458 1492
1493 (*frag)->priority = skb->priority;
1494
1459 sent += count; 1495 sent += count;
1460 len -= count; 1496 len -= count;
1461 1497
@@ -1465,15 +1501,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1465 return sent; 1501 return sent;
1466} 1502}
1467 1503
1468static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1504static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1505 struct msghdr *msg, size_t len,
1506 u32 priority)
1469{ 1507{
1470 struct sock *sk = chan->sk; 1508 struct sock *sk = chan->sk;
1471 struct l2cap_conn *conn = chan->conn; 1509 struct l2cap_conn *conn = chan->conn;
1472 struct sk_buff *skb; 1510 struct sk_buff *skb;
1473 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1511 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1474 struct l2cap_hdr *lh; 1512 struct l2cap_hdr *lh;
1475 1513
1476 BT_DBG("sk %p len %d", sk, (int)len); 1514 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1477 1515
1478 count = min_t(unsigned int, (conn->mtu - hlen), len); 1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 skb = bt_skb_send_alloc(sk, count + hlen, 1517 skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1519,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
1481 if (!skb) 1519 if (!skb)
1482 return ERR_PTR(err); 1520 return ERR_PTR(err);
1483 1521
1522 skb->priority = priority;
1523
1484 /* Create L2CAP header */ 1524 /* Create L2CAP header */
1485 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 lh->cid = cpu_to_le16(chan->dcid); 1526 lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1535,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
1495 return skb; 1535 return skb;
1496} 1536}
1497 1537
1498static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1538static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1539 struct msghdr *msg, size_t len,
1540 u32 priority)
1499{ 1541{
1500 struct sock *sk = chan->sk; 1542 struct sock *sk = chan->sk;
1501 struct l2cap_conn *conn = chan->conn; 1543 struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1553,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
1511 if (!skb) 1553 if (!skb)
1512 return ERR_PTR(err); 1554 return ERR_PTR(err);
1513 1555
1556 skb->priority = priority;
1557
1514 /* Create L2CAP header */ 1558 /* Create L2CAP header */
1515 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 lh->cid = cpu_to_le16(chan->dcid); 1560 lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1570,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
1526 1570
1527static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 1571static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 struct msghdr *msg, size_t len, 1572 struct msghdr *msg, size_t len,
1529 u16 control, u16 sdulen) 1573 u32 control, u16 sdulen)
1530{ 1574{
1531 struct sock *sk = chan->sk; 1575 struct sock *sk = chan->sk;
1532 struct l2cap_conn *conn = chan->conn; 1576 struct l2cap_conn *conn = chan->conn;
1533 struct sk_buff *skb; 1577 struct sk_buff *skb;
1534 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1578 int err, count, hlen;
1535 struct l2cap_hdr *lh; 1579 struct l2cap_hdr *lh;
1536 1580
1537 BT_DBG("sk %p len %d", sk, (int)len); 1581 BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1583,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1539 if (!conn) 1583 if (!conn)
1540 return ERR_PTR(-ENOTCONN); 1584 return ERR_PTR(-ENOTCONN);
1541 1585
1586 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1587 hlen = L2CAP_EXT_HDR_SIZE;
1588 else
1589 hlen = L2CAP_ENH_HDR_SIZE;
1590
1542 if (sdulen) 1591 if (sdulen)
1543 hlen += 2; 1592 hlen += L2CAP_SDULEN_SIZE;
1544 1593
1545 if (chan->fcs == L2CAP_FCS_CRC16) 1594 if (chan->fcs == L2CAP_FCS_CRC16)
1546 hlen += 2; 1595 hlen += L2CAP_FCS_SIZE;
1547 1596
1548 count = min_t(unsigned int, (conn->mtu - hlen), len); 1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen, 1598 skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1604,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1556 lh->cid = cpu_to_le16(chan->dcid); 1605 lh->cid = cpu_to_le16(chan->dcid);
1557 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1558 put_unaligned_le16(control, skb_put(skb, 2)); 1607
1608 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609
1559 if (sdulen) 1610 if (sdulen)
1560 put_unaligned_le16(sdulen, skb_put(skb, 2)); 1611 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1561 1612
1562 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1613 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1563 if (unlikely(err < 0)) { 1614 if (unlikely(err < 0)) {
@@ -1566,7 +1617,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1566 } 1617 }
1567 1618
1568 if (chan->fcs == L2CAP_FCS_CRC16) 1619 if (chan->fcs == L2CAP_FCS_CRC16)
1569 put_unaligned_le16(0, skb_put(skb, 2)); 1620 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1570 1621
1571 bt_cb(skb)->retries = 0; 1622 bt_cb(skb)->retries = 0;
1572 return skb; 1623 return skb;
@@ -1576,11 +1627,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
1576{ 1627{
1577 struct sk_buff *skb; 1628 struct sk_buff *skb;
1578 struct sk_buff_head sar_queue; 1629 struct sk_buff_head sar_queue;
1579 u16 control; 1630 u32 control;
1580 size_t size = 0; 1631 size_t size = 0;
1581 1632
1582 skb_queue_head_init(&sar_queue); 1633 skb_queue_head_init(&sar_queue);
1583 control = L2CAP_SDU_START; 1634 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1584 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); 1635 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1585 if (IS_ERR(skb)) 1636 if (IS_ERR(skb))
1586 return PTR_ERR(skb); 1637 return PTR_ERR(skb);
@@ -1593,10 +1644,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
1593 size_t buflen; 1644 size_t buflen;
1594 1645
1595 if (len > chan->remote_mps) { 1646 if (len > chan->remote_mps) {
1596 control = L2CAP_SDU_CONTINUE; 1647 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1597 buflen = chan->remote_mps; 1648 buflen = chan->remote_mps;
1598 } else { 1649 } else {
1599 control = L2CAP_SDU_END; 1650 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1600 buflen = len; 1651 buflen = len;
1601 } 1652 }
1602 1653
@@ -1617,15 +1668,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
1617 return size; 1668 return size;
1618} 1669}
1619 1670
1620int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1671int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 u32 priority)
1621{ 1673{
1622 struct sk_buff *skb; 1674 struct sk_buff *skb;
1623 u16 control; 1675 u32 control;
1624 int err; 1676 int err;
1625 1677
1626 /* Connectionless channel */ 1678 /* Connectionless channel */
1627 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 1679 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1628 skb = l2cap_create_connless_pdu(chan, msg, len); 1680 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1629 if (IS_ERR(skb)) 1681 if (IS_ERR(skb))
1630 return PTR_ERR(skb); 1682 return PTR_ERR(skb);
1631 1683
@@ -1640,7 +1692,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1640 return -EMSGSIZE; 1692 return -EMSGSIZE;
1641 1693
1642 /* Create a basic PDU */ 1694 /* Create a basic PDU */
1643 skb = l2cap_create_basic_pdu(chan, msg, len); 1695 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1644 if (IS_ERR(skb)) 1696 if (IS_ERR(skb))
1645 return PTR_ERR(skb); 1697 return PTR_ERR(skb);
1646 1698
@@ -1652,7 +1704,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1652 case L2CAP_MODE_STREAMING: 1704 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */ 1705 /* Entire SDU fits into one PDU */
1654 if (len <= chan->remote_mps) { 1706 if (len <= chan->remote_mps) {
1655 control = L2CAP_SDU_UNSEGMENTED; 1707 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1656 skb = l2cap_create_iframe_pdu(chan, msg, len, control, 1708 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1657 0); 1709 0);
1658 if (IS_ERR(skb)) 1710 if (IS_ERR(skb))
@@ -1850,6 +1902,37 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1850 *ptr += L2CAP_CONF_OPT_SIZE + len; 1902 *ptr += L2CAP_CONF_OPT_SIZE + len;
1851} 1903}
1852 1904
1905static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1906{
1907 struct l2cap_conf_efs efs;
1908
1909 switch(chan->mode) {
1910 case L2CAP_MODE_ERTM:
1911 efs.id = chan->local_id;
1912 efs.stype = chan->local_stype;
1913 efs.msdu = cpu_to_le16(chan->local_msdu);
1914 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1915 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1916 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 break;
1918
1919 case L2CAP_MODE_STREAMING:
1920 efs.id = 1;
1921 efs.stype = L2CAP_SERV_BESTEFFORT;
1922 efs.msdu = cpu_to_le16(chan->local_msdu);
1923 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1924 efs.acc_lat = 0;
1925 efs.flush_to = 0;
1926 break;
1927
1928 default:
1929 return;
1930 }
1931
1932 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1933 (unsigned long) &efs);
1934}
1935
1853static void l2cap_ack_timeout(unsigned long arg) 1936static void l2cap_ack_timeout(unsigned long arg)
1854{ 1937{
1855 struct l2cap_chan *chan = (void *) arg; 1938 struct l2cap_chan *chan = (void *) arg;
@@ -1896,11 +1979,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1896 } 1979 }
1897} 1980}
1898 1981
1982static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1983{
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985}
1986
1987static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1988{
1989 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990}
1991
1992static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1993{
1994 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1995 __l2cap_ews_supported(chan)) {
1996 /* use extended control field */
1997 set_bit(FLAG_EXT_CTRL, &chan->flags);
1998 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1999 } else {
2000 chan->tx_win = min_t(u16, chan->tx_win,
2001 L2CAP_DEFAULT_TX_WINDOW);
2002 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2003 }
2004}
2005
1899static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) 2006static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900{ 2007{
1901 struct l2cap_conf_req *req = data; 2008 struct l2cap_conf_req *req = data;
1902 struct l2cap_conf_rfc rfc = { .mode = chan->mode }; 2009 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1903 void *ptr = req->data; 2010 void *ptr = req->data;
2011 u16 size;
1904 2012
1905 BT_DBG("chan %p", chan); 2013 BT_DBG("chan %p", chan);
1906 2014
@@ -1913,6 +2021,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1913 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) 2021 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1914 break; 2022 break;
1915 2023
2024 if (__l2cap_efs_supported(chan))
2025 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2026
1916 /* fall through */ 2027 /* fall through */
1917 default: 2028 default:
1918 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); 2029 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2053,27 @@ done:
1942 2053
1943 case L2CAP_MODE_ERTM: 2054 case L2CAP_MODE_ERTM:
1944 rfc.mode = L2CAP_MODE_ERTM; 2055 rfc.mode = L2CAP_MODE_ERTM;
1945 rfc.txwin_size = chan->tx_win;
1946 rfc.max_transmit = chan->max_tx; 2056 rfc.max_transmit = chan->max_tx;
1947 rfc.retrans_timeout = 0; 2057 rfc.retrans_timeout = 0;
1948 rfc.monitor_timeout = 0; 2058 rfc.monitor_timeout = 0;
1949 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2059
1950 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) 2060 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
1951 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); 2061 L2CAP_EXT_HDR_SIZE -
2062 L2CAP_SDULEN_SIZE -
2063 L2CAP_FCS_SIZE);
2064 rfc.max_pdu_size = cpu_to_le16(size);
2065
2066 l2cap_txwin_setup(chan);
2067
2068 rfc.txwin_size = min_t(u16, chan->tx_win,
2069 L2CAP_DEFAULT_TX_WINDOW);
1952 2070
1953 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1954 (unsigned long) &rfc); 2072 (unsigned long) &rfc);
1955 2073
2074 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2075 l2cap_add_opt_efs(&ptr, chan);
2076
1956 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) 2077 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1957 break; 2078 break;
1958 2079
@@ -1961,6 +2082,10 @@ done:
1961 chan->fcs = L2CAP_FCS_NONE; 2082 chan->fcs = L2CAP_FCS_NONE;
1962 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1963 } 2084 }
2085
2086 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2088 chan->tx_win);
1964 break; 2089 break;
1965 2090
1966 case L2CAP_MODE_STREAMING: 2091 case L2CAP_MODE_STREAMING:
@@ -1969,13 +2094,19 @@ done:
1969 rfc.max_transmit = 0; 2094 rfc.max_transmit = 0;
1970 rfc.retrans_timeout = 0; 2095 rfc.retrans_timeout = 0;
1971 rfc.monitor_timeout = 0; 2096 rfc.monitor_timeout = 0;
1972 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2097
1973 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) 2098 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
1974 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); 2099 L2CAP_EXT_HDR_SIZE -
2100 L2CAP_SDULEN_SIZE -
2101 L2CAP_FCS_SIZE);
2102 rfc.max_pdu_size = cpu_to_le16(size);
1975 2103
1976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 (unsigned long) &rfc); 2105 (unsigned long) &rfc);
1978 2106
2107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2108 l2cap_add_opt_efs(&ptr, chan);
2109
1979 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) 2110 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1980 break; 2111 break;
1981 2112
@@ -2002,8 +2133,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2002 int type, hint, olen; 2133 int type, hint, olen;
2003 unsigned long val; 2134 unsigned long val;
2004 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 2135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2136 struct l2cap_conf_efs efs;
2137 u8 remote_efs = 0;
2005 u16 mtu = L2CAP_DEFAULT_MTU; 2138 u16 mtu = L2CAP_DEFAULT_MTU;
2006 u16 result = L2CAP_CONF_SUCCESS; 2139 u16 result = L2CAP_CONF_SUCCESS;
2140 u16 size;
2007 2141
2008 BT_DBG("chan %p", chan); 2142 BT_DBG("chan %p", chan);
2009 2143
@@ -2033,7 +2167,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2033 case L2CAP_CONF_FCS: 2167 case L2CAP_CONF_FCS:
2034 if (val == L2CAP_FCS_NONE) 2168 if (val == L2CAP_FCS_NONE)
2035 set_bit(CONF_NO_FCS_RECV, &chan->conf_state); 2169 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 break;
2171
2172 case L2CAP_CONF_EFS:
2173 remote_efs = 1;
2174 if (olen == sizeof(efs))
2175 memcpy(&efs, (void *) val, olen);
2176 break;
2177
2178 case L2CAP_CONF_EWS:
2179 if (!enable_hs)
2180 return -ECONNREFUSED;
2036 2181
2182 set_bit(FLAG_EXT_CTRL, &chan->flags);
2183 set_bit(CONF_EWS_RECV, &chan->conf_state);
2184 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2185 chan->remote_tx_win = val;
2037 break; 2186 break;
2038 2187
2039 default: 2188 default:
@@ -2058,6 +2207,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2058 break; 2207 break;
2059 } 2208 }
2060 2209
2210 if (remote_efs) {
2211 if (__l2cap_efs_supported(chan))
2212 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2213 else
2214 return -ECONNREFUSED;
2215 }
2216
2061 if (chan->mode != rfc.mode) 2217 if (chan->mode != rfc.mode)
2062 return -ECONNREFUSED; 2218 return -ECONNREFUSED;
2063 2219
@@ -2076,7 +2232,6 @@ done:
2076 sizeof(rfc), (unsigned long) &rfc); 2232 sizeof(rfc), (unsigned long) &rfc);
2077 } 2233 }
2078 2234
2079
2080 if (result == L2CAP_CONF_SUCCESS) { 2235 if (result == L2CAP_CONF_SUCCESS) {
2081 /* Configure output options and let the other side know 2236 /* Configure output options and let the other side know
2082 * which ones we don't like. */ 2237 * which ones we don't like. */
@@ -2089,6 +2244,26 @@ done:
2089 } 2244 }
2090 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2091 2246
2247 if (remote_efs) {
2248 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2249 efs.stype != L2CAP_SERV_NOTRAFIC &&
2250 efs.stype != chan->local_stype) {
2251
2252 result = L2CAP_CONF_UNACCEPT;
2253
2254 if (chan->num_conf_req >= 1)
2255 return -ECONNREFUSED;
2256
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2258 sizeof(efs),
2259 (unsigned long) &efs);
2260 } else {
2261 /* Send PENDING Conf Rsp */
2262 result = L2CAP_CONF_PENDING;
2263 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2264 }
2265 }
2266
2092 switch (rfc.mode) { 2267 switch (rfc.mode) {
2093 case L2CAP_MODE_BASIC: 2268 case L2CAP_MODE_BASIC:
2094 chan->fcs = L2CAP_FCS_NONE; 2269 chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2271,20 @@ done:
2096 break; 2271 break;
2097 2272
2098 case L2CAP_MODE_ERTM: 2273 case L2CAP_MODE_ERTM:
2099 chan->remote_tx_win = rfc.txwin_size; 2274 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2100 chan->remote_max_tx = rfc.max_transmit; 2275 chan->remote_tx_win = rfc.txwin_size;
2276 else
2277 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2101 2278
2102 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) 2279 chan->remote_max_tx = rfc.max_transmit;
2103 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104 2280
2105 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2281 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2282 chan->conn->mtu -
2283 L2CAP_EXT_HDR_SIZE -
2284 L2CAP_SDULEN_SIZE -
2285 L2CAP_FCS_SIZE);
2286 rfc.max_pdu_size = cpu_to_le16(size);
2287 chan->remote_mps = size;
2106 2288
2107 rfc.retrans_timeout = 2289 rfc.retrans_timeout =
2108 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 2290 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2296,29 @@ done:
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2115 sizeof(rfc), (unsigned long) &rfc); 2297 sizeof(rfc), (unsigned long) &rfc);
2116 2298
2299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2300 chan->remote_id = efs.id;
2301 chan->remote_stype = efs.stype;
2302 chan->remote_msdu = le16_to_cpu(efs.msdu);
2303 chan->remote_flush_to =
2304 le32_to_cpu(efs.flush_to);
2305 chan->remote_acc_lat =
2306 le32_to_cpu(efs.acc_lat);
2307 chan->remote_sdu_itime =
2308 le32_to_cpu(efs.sdu_itime);
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs), (unsigned long) &efs);
2311 }
2117 break; 2312 break;
2118 2313
2119 case L2CAP_MODE_STREAMING: 2314 case L2CAP_MODE_STREAMING:
2120 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) 2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2121 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); 2316 chan->conn->mtu -
2122 2317 L2CAP_EXT_HDR_SIZE -
2123 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2318 L2CAP_SDULEN_SIZE -
2319 L2CAP_FCS_SIZE);
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2124 2322
2125 set_bit(CONF_MODE_DONE, &chan->conf_state); 2323 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126 2324
@@ -2153,6 +2351,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2153 int type, olen; 2351 int type, olen;
2154 unsigned long val; 2352 unsigned long val;
2155 struct l2cap_conf_rfc rfc; 2353 struct l2cap_conf_rfc rfc;
2354 struct l2cap_conf_efs efs;
2156 2355
2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 2356 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158 2357
@@ -2188,6 +2387,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2189 sizeof(rfc), (unsigned long) &rfc); 2388 sizeof(rfc), (unsigned long) &rfc);
2190 break; 2389 break;
2390
2391 case L2CAP_CONF_EWS:
2392 chan->tx_win = min_t(u16, val,
2393 L2CAP_DEFAULT_EXT_WINDOW);
2394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2395 chan->tx_win);
2396 break;
2397
2398 case L2CAP_CONF_EFS:
2399 if (olen == sizeof(efs))
2400 memcpy(&efs, (void *)val, olen);
2401
2402 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2403 efs.stype != L2CAP_SERV_NOTRAFIC &&
2404 efs.stype != chan->local_stype)
2405 return -ECONNREFUSED;
2406
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2408 sizeof(efs), (unsigned long) &efs);
2409 break;
2191 } 2410 }
2192 } 2411 }
2193 2412
@@ -2196,13 +2415,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2196 2415
2197 chan->mode = rfc.mode; 2416 chan->mode = rfc.mode;
2198 2417
2199 if (*result == L2CAP_CONF_SUCCESS) { 2418 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2200 switch (rfc.mode) { 2419 switch (rfc.mode) {
2201 case L2CAP_MODE_ERTM: 2420 case L2CAP_MODE_ERTM:
2202 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2421 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2203 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2422 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2204 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2423 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2424
2425 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2426 chan->local_msdu = le16_to_cpu(efs.msdu);
2427 chan->local_sdu_itime =
2428 le32_to_cpu(efs.sdu_itime);
2429 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2430 chan->local_flush_to =
2431 le32_to_cpu(efs.flush_to);
2432 }
2205 break; 2433 break;
2434
2206 case L2CAP_MODE_STREAMING: 2435 case L2CAP_MODE_STREAMING:
2207 chan->mps = le16_to_cpu(rfc.max_pdu_size); 2436 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2208 } 2437 }
@@ -2330,7 +2559,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2330 /* Check if the ACL is secure enough (if not SDP) */ 2559 /* Check if the ACL is secure enough (if not SDP) */
2331 if (psm != cpu_to_le16(0x0001) && 2560 if (psm != cpu_to_le16(0x0001) &&
2332 !hci_conn_check_link_mode(conn->hcon)) { 2561 !hci_conn_check_link_mode(conn->hcon)) {
2333 conn->disc_reason = 0x05; 2562 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2334 result = L2CAP_CR_SEC_BLOCK; 2563 result = L2CAP_CR_SEC_BLOCK;
2335 goto response; 2564 goto response;
2336 } 2565 }
@@ -2602,6 +2831,21 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2602 chan->num_conf_req++; 2831 chan->num_conf_req++;
2603 } 2832 }
2604 2833
2834 /* Got Conf Rsp PENDING from remote side and asume we sent
2835 Conf Rsp PENDING in the code above */
2836 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2837 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2838
2839 /* check compatibility */
2840
2841 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2842 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2843
2844 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2845 l2cap_build_conf_rsp(chan, rsp,
2846 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2847 }
2848
2605unlock: 2849unlock:
2606 bh_unlock_sock(sk); 2850 bh_unlock_sock(sk);
2607 return 0; 2851 return 0;
@@ -2631,8 +2875,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2631 switch (result) { 2875 switch (result) {
2632 case L2CAP_CONF_SUCCESS: 2876 case L2CAP_CONF_SUCCESS:
2633 l2cap_conf_rfc_get(chan, rsp->data, len); 2877 l2cap_conf_rfc_get(chan, rsp->data, len);
2878 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2634 break; 2879 break;
2635 2880
2881 case L2CAP_CONF_PENDING:
2882 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2883
2884 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2885 char buf[64];
2886
2887 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2888 buf, &result);
2889 if (len < 0) {
2890 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2891 goto done;
2892 }
2893
2894 /* check compatibility */
2895
2896 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2897 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2898
2899 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2900 l2cap_build_conf_rsp(chan, buf,
2901 L2CAP_CONF_SUCCESS, 0x0000), buf);
2902 }
2903 goto done;
2904
2636 case L2CAP_CONF_UNACCEPT: 2905 case L2CAP_CONF_UNACCEPT:
2637 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2906 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2638 char req[64]; 2907 char req[64];
@@ -2782,15 +3051,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
2782 if (!disable_ertm) 3051 if (!disable_ertm)
2783 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3052 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2784 | L2CAP_FEAT_FCS; 3053 | L2CAP_FEAT_FCS;
3054 if (enable_hs)
3055 feat_mask |= L2CAP_FEAT_EXT_FLOW
3056 | L2CAP_FEAT_EXT_WINDOW;
3057
2785 put_unaligned_le32(feat_mask, rsp->data); 3058 put_unaligned_le32(feat_mask, rsp->data);
2786 l2cap_send_cmd(conn, cmd->ident, 3059 l2cap_send_cmd(conn, cmd->ident,
2787 L2CAP_INFO_RSP, sizeof(buf), buf); 3060 L2CAP_INFO_RSP, sizeof(buf), buf);
2788 } else if (type == L2CAP_IT_FIXED_CHAN) { 3061 } else if (type == L2CAP_IT_FIXED_CHAN) {
2789 u8 buf[12]; 3062 u8 buf[12];
2790 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3063 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3064
3065 if (enable_hs)
3066 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3067 else
3068 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3069
2791 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3070 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2792 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3071 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2793 memcpy(buf + 4, l2cap_fixed_chan, 8); 3072 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
2794 l2cap_send_cmd(conn, cmd->ident, 3073 l2cap_send_cmd(conn, cmd->ident,
2795 L2CAP_INFO_RSP, sizeof(buf), buf); 3074 L2CAP_INFO_RSP, sizeof(buf), buf);
2796 } else { 3075 } else {
@@ -2857,6 +3136,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2857 return 0; 3136 return 0;
2858} 3137}
2859 3138
3139static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3140 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3141 void *data)
3142{
3143 struct l2cap_create_chan_req *req = data;
3144 struct l2cap_create_chan_rsp rsp;
3145 u16 psm, scid;
3146
3147 if (cmd_len != sizeof(*req))
3148 return -EPROTO;
3149
3150 if (!enable_hs)
3151 return -EINVAL;
3152
3153 psm = le16_to_cpu(req->psm);
3154 scid = le16_to_cpu(req->scid);
3155
3156 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3157
3158 /* Placeholder: Always reject */
3159 rsp.dcid = 0;
3160 rsp.scid = cpu_to_le16(scid);
3161 rsp.result = L2CAP_CR_NO_MEM;
3162 rsp.status = L2CAP_CS_NO_INFO;
3163
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3165 sizeof(rsp), &rsp);
3166
3167 return 0;
3168}
3169
3170static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, void *data)
3172{
3173 BT_DBG("conn %p", conn);
3174
3175 return l2cap_connect_rsp(conn, cmd, data);
3176}
3177
3178static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3179 u16 icid, u16 result)
3180{
3181 struct l2cap_move_chan_rsp rsp;
3182
3183 BT_DBG("icid %d, result %d", icid, result);
3184
3185 rsp.icid = cpu_to_le16(icid);
3186 rsp.result = cpu_to_le16(result);
3187
3188 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3189}
3190
3191static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3192 struct l2cap_chan *chan, u16 icid, u16 result)
3193{
3194 struct l2cap_move_chan_cfm cfm;
3195 u8 ident;
3196
3197 BT_DBG("icid %d, result %d", icid, result);
3198
3199 ident = l2cap_get_ident(conn);
3200 if (chan)
3201 chan->ident = ident;
3202
3203 cfm.icid = cpu_to_le16(icid);
3204 cfm.result = cpu_to_le16(result);
3205
3206 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3207}
3208
3209static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3210 u16 icid)
3211{
3212 struct l2cap_move_chan_cfm_rsp rsp;
3213
3214 BT_DBG("icid %d", icid);
3215
3216 rsp.icid = cpu_to_le16(icid);
3217 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3218}
3219
3220static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3221 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3222{
3223 struct l2cap_move_chan_req *req = data;
3224 u16 icid = 0;
3225 u16 result = L2CAP_MR_NOT_ALLOWED;
3226
3227 if (cmd_len != sizeof(*req))
3228 return -EPROTO;
3229
3230 icid = le16_to_cpu(req->icid);
3231
3232 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3233
3234 if (!enable_hs)
3235 return -EINVAL;
3236
3237 /* Placeholder: Always refuse */
3238 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3239
3240 return 0;
3241}
3242
3243static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3245{
3246 struct l2cap_move_chan_rsp *rsp = data;
3247 u16 icid, result;
3248
3249 if (cmd_len != sizeof(*rsp))
3250 return -EPROTO;
3251
3252 icid = le16_to_cpu(rsp->icid);
3253 result = le16_to_cpu(rsp->result);
3254
3255 BT_DBG("icid %d, result %d", icid, result);
3256
3257 /* Placeholder: Always unconfirmed */
3258 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3259
3260 return 0;
3261}
3262
3263static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3264 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3265{
3266 struct l2cap_move_chan_cfm *cfm = data;
3267 u16 icid, result;
3268
3269 if (cmd_len != sizeof(*cfm))
3270 return -EPROTO;
3271
3272 icid = le16_to_cpu(cfm->icid);
3273 result = le16_to_cpu(cfm->result);
3274
3275 BT_DBG("icid %d, result %d", icid, result);
3276
3277 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3278
3279 return 0;
3280}
3281
3282static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3283 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3284{
3285 struct l2cap_move_chan_cfm_rsp *rsp = data;
3286 u16 icid;
3287
3288 if (cmd_len != sizeof(*rsp))
3289 return -EPROTO;
3290
3291 icid = le16_to_cpu(rsp->icid);
3292
3293 BT_DBG("icid %d", icid);
3294
3295 return 0;
3296}
3297
2860static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, 3298static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2861 u16 to_multiplier) 3299 u16 to_multiplier)
2862{ 3300{
@@ -2969,6 +3407,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2969 err = l2cap_information_rsp(conn, cmd, data); 3407 err = l2cap_information_rsp(conn, cmd, data);
2970 break; 3408 break;
2971 3409
3410 case L2CAP_CREATE_CHAN_REQ:
3411 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3412 break;
3413
3414 case L2CAP_CREATE_CHAN_RSP:
3415 err = l2cap_create_channel_rsp(conn, cmd, data);
3416 break;
3417
3418 case L2CAP_MOVE_CHAN_REQ:
3419 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3420 break;
3421
3422 case L2CAP_MOVE_CHAN_RSP:
3423 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3424 break;
3425
3426 case L2CAP_MOVE_CHAN_CFM:
3427 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3428 break;
3429
3430 case L2CAP_MOVE_CHAN_CFM_RSP:
3431 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3432 break;
3433
2972 default: 3434 default:
2973 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); 3435 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2974 err = -EINVAL; 3436 err = -EINVAL;
@@ -3047,10 +3509,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3047static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) 3509static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3048{ 3510{
3049 u16 our_fcs, rcv_fcs; 3511 u16 our_fcs, rcv_fcs;
3050 int hdr_size = L2CAP_HDR_SIZE + 2; 3512 int hdr_size;
3513
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 hdr_size = L2CAP_EXT_HDR_SIZE;
3516 else
3517 hdr_size = L2CAP_ENH_HDR_SIZE;
3051 3518
3052 if (chan->fcs == L2CAP_FCS_CRC16) { 3519 if (chan->fcs == L2CAP_FCS_CRC16) {
3053 skb_trim(skb, skb->len - 2); 3520 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3054 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 3521 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3055 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 3522 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3056 3523
@@ -3062,14 +3529,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3062 3529
3063static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 3530static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3064{ 3531{
3065 u16 control = 0; 3532 u32 control = 0;
3066 3533
3067 chan->frames_sent = 0; 3534 chan->frames_sent = 0;
3068 3535
3069 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3536 control |= __set_reqseq(chan, chan->buffer_seq);
3070 3537
3071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 3538 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3072 control |= L2CAP_SUPER_RCV_NOT_READY; 3539 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3073 l2cap_send_sframe(chan, control); 3540 l2cap_send_sframe(chan, control);
3074 set_bit(CONN_RNR_SENT, &chan->conn_state); 3541 set_bit(CONN_RNR_SENT, &chan->conn_state);
3075 } 3542 }
@@ -3081,12 +3548,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3081 3548
3082 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 3549 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3083 chan->frames_sent == 0) { 3550 chan->frames_sent == 0) {
3084 control |= L2CAP_SUPER_RCV_READY; 3551 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3085 l2cap_send_sframe(chan, control); 3552 l2cap_send_sframe(chan, control);
3086 } 3553 }
3087} 3554}
3088 3555
3089static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar) 3556static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3090{ 3557{
3091 struct sk_buff *next_skb; 3558 struct sk_buff *next_skb;
3092 int tx_seq_offset, next_tx_seq_offset; 3559 int tx_seq_offset, next_tx_seq_offset;
@@ -3100,18 +3567,14 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
3100 return 0; 3567 return 0;
3101 } 3568 }
3102 3569
3103 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; 3570 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3104 if (tx_seq_offset < 0)
3105 tx_seq_offset += 64;
3106 3571
3107 do { 3572 do {
3108 if (bt_cb(next_skb)->tx_seq == tx_seq) 3573 if (bt_cb(next_skb)->tx_seq == tx_seq)
3109 return -EINVAL; 3574 return -EINVAL;
3110 3575
3111 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - 3576 next_tx_seq_offset = __seq_offset(chan,
3112 chan->buffer_seq) % 64; 3577 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3113 if (next_tx_seq_offset < 0)
3114 next_tx_seq_offset += 64;
3115 3578
3116 if (next_tx_seq_offset > tx_seq_offset) { 3579 if (next_tx_seq_offset > tx_seq_offset) {
3117 __skb_queue_before(&chan->srej_q, next_skb, skb); 3580 __skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3147,24 +3610,24 @@ static void append_skb_frag(struct sk_buff *skb,
3147 skb->truesize += new_frag->truesize; 3610 skb->truesize += new_frag->truesize;
3148} 3611}
3149 3612
3150static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3613static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3151{ 3614{
3152 int err = -EINVAL; 3615 int err = -EINVAL;
3153 3616
3154 switch (control & L2CAP_CTRL_SAR) { 3617 switch (__get_ctrl_sar(chan, control)) {
3155 case L2CAP_SDU_UNSEGMENTED: 3618 case L2CAP_SAR_UNSEGMENTED:
3156 if (chan->sdu) 3619 if (chan->sdu)
3157 break; 3620 break;
3158 3621
3159 err = chan->ops->recv(chan->data, skb); 3622 err = chan->ops->recv(chan->data, skb);
3160 break; 3623 break;
3161 3624
3162 case L2CAP_SDU_START: 3625 case L2CAP_SAR_START:
3163 if (chan->sdu) 3626 if (chan->sdu)
3164 break; 3627 break;
3165 3628
3166 chan->sdu_len = get_unaligned_le16(skb->data); 3629 chan->sdu_len = get_unaligned_le16(skb->data);
3167 skb_pull(skb, 2); 3630 skb_pull(skb, L2CAP_SDULEN_SIZE);
3168 3631
3169 if (chan->sdu_len > chan->imtu) { 3632 if (chan->sdu_len > chan->imtu) {
3170 err = -EMSGSIZE; 3633 err = -EMSGSIZE;
@@ -3181,7 +3644,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
3181 err = 0; 3644 err = 0;
3182 break; 3645 break;
3183 3646
3184 case L2CAP_SDU_CONTINUE: 3647 case L2CAP_SAR_CONTINUE:
3185 if (!chan->sdu) 3648 if (!chan->sdu)
3186 break; 3649 break;
3187 3650
@@ -3195,7 +3658,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
3195 err = 0; 3658 err = 0;
3196 break; 3659 break;
3197 3660
3198 case L2CAP_SDU_END: 3661 case L2CAP_SAR_END:
3199 if (!chan->sdu) 3662 if (!chan->sdu)
3200 break; 3663 break;
3201 3664
@@ -3230,14 +3693,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
3230 3693
3231static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 3694static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3232{ 3695{
3233 u16 control; 3696 u32 control;
3234 3697
3235 BT_DBG("chan %p, Enter local busy", chan); 3698 BT_DBG("chan %p, Enter local busy", chan);
3236 3699
3237 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 3700 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3238 3701
3239 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3702 control = __set_reqseq(chan, chan->buffer_seq);
3240 control |= L2CAP_SUPER_RCV_NOT_READY; 3703 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3241 l2cap_send_sframe(chan, control); 3704 l2cap_send_sframe(chan, control);
3242 3705
3243 set_bit(CONN_RNR_SENT, &chan->conn_state); 3706 set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3247,13 +3710,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3247 3710
3248static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 3711static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3249{ 3712{
3250 u16 control; 3713 u32 control;
3251 3714
3252 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 3715 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3253 goto done; 3716 goto done;
3254 3717
3255 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3718 control = __set_reqseq(chan, chan->buffer_seq);
3256 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3719 control |= __set_ctrl_poll(chan);
3720 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3257 l2cap_send_sframe(chan, control); 3721 l2cap_send_sframe(chan, control);
3258 chan->retry_count = 1; 3722 chan->retry_count = 1;
3259 3723
@@ -3279,10 +3743,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3279 } 3743 }
3280} 3744}
3281 3745
3282static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) 3746static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3283{ 3747{
3284 struct sk_buff *skb; 3748 struct sk_buff *skb;
3285 u16 control; 3749 u32 control;
3286 3750
3287 while ((skb = skb_peek(&chan->srej_q)) && 3751 while ((skb = skb_peek(&chan->srej_q)) &&
3288 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 3752 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3292,7 +3756,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3292 break; 3756 break;
3293 3757
3294 skb = skb_dequeue(&chan->srej_q); 3758 skb = skb_dequeue(&chan->srej_q);
3295 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3759 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3296 err = l2cap_reassemble_sdu(chan, skb, control); 3760 err = l2cap_reassemble_sdu(chan, skb, control);
3297 3761
3298 if (err < 0) { 3762 if (err < 0) {
@@ -3300,16 +3764,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3300 break; 3764 break;
3301 } 3765 }
3302 3766
3303 chan->buffer_seq_srej = 3767 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3304 (chan->buffer_seq_srej + 1) % 64; 3768 tx_seq = __next_seq(chan, tx_seq);
3305 tx_seq = (tx_seq + 1) % 64;
3306 } 3769 }
3307} 3770}
3308 3771
3309static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) 3772static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3310{ 3773{
3311 struct srej_list *l, *tmp; 3774 struct srej_list *l, *tmp;
3312 u16 control; 3775 u32 control;
3313 3776
3314 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 3777 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3315 if (l->tx_seq == tx_seq) { 3778 if (l->tx_seq == tx_seq) {
@@ -3317,45 +3780,48 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3317 kfree(l); 3780 kfree(l);
3318 return; 3781 return;
3319 } 3782 }
3320 control = L2CAP_SUPER_SELECT_REJECT; 3783 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3321 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3784 control |= __set_reqseq(chan, l->tx_seq);
3322 l2cap_send_sframe(chan, control); 3785 l2cap_send_sframe(chan, control);
3323 list_del(&l->list); 3786 list_del(&l->list);
3324 list_add_tail(&l->list, &chan->srej_l); 3787 list_add_tail(&l->list, &chan->srej_l);
3325 } 3788 }
3326} 3789}
3327 3790
3328static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq) 3791static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3329{ 3792{
3330 struct srej_list *new; 3793 struct srej_list *new;
3331 u16 control; 3794 u32 control;
3332 3795
3333 while (tx_seq != chan->expected_tx_seq) { 3796 while (tx_seq != chan->expected_tx_seq) {
3334 control = L2CAP_SUPER_SELECT_REJECT; 3797 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3335 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3798 control |= __set_reqseq(chan, chan->expected_tx_seq);
3336 l2cap_send_sframe(chan, control); 3799 l2cap_send_sframe(chan, control);
3337 3800
3338 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3801 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3339 new->tx_seq = chan->expected_tx_seq; 3802 new->tx_seq = chan->expected_tx_seq;
3340 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3803
3804 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3805
3341 list_add_tail(&new->list, &chan->srej_l); 3806 list_add_tail(&new->list, &chan->srej_l);
3342 } 3807 }
3343 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3808
3809 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3344} 3810}
3345 3811
3346static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) 3812static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3347{ 3813{
3348 u8 tx_seq = __get_txseq(rx_control); 3814 u16 tx_seq = __get_txseq(chan, rx_control);
3349 u8 req_seq = __get_reqseq(rx_control); 3815 u16 req_seq = __get_reqseq(chan, rx_control);
3350 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3816 u8 sar = __get_ctrl_sar(chan, rx_control);
3351 int tx_seq_offset, expected_tx_seq_offset; 3817 int tx_seq_offset, expected_tx_seq_offset;
3352 int num_to_ack = (chan->tx_win/6) + 1; 3818 int num_to_ack = (chan->tx_win/6) + 1;
3353 int err = 0; 3819 int err = 0;
3354 3820
3355 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len, 3821 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3356 tx_seq, rx_control); 3822 tx_seq, rx_control);
3357 3823
3358 if (L2CAP_CTRL_FINAL & rx_control && 3824 if (__is_ctrl_final(chan, rx_control) &&
3359 test_bit(CONN_WAIT_F, &chan->conn_state)) { 3825 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3360 __clear_monitor_timer(chan); 3826 __clear_monitor_timer(chan);
3361 if (chan->unacked_frames > 0) 3827 if (chan->unacked_frames > 0)
@@ -3366,9 +3832,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3366 chan->expected_ack_seq = req_seq; 3832 chan->expected_ack_seq = req_seq;
3367 l2cap_drop_acked_frames(chan); 3833 l2cap_drop_acked_frames(chan);
3368 3834
3369 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; 3835 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3370 if (tx_seq_offset < 0)
3371 tx_seq_offset += 64;
3372 3836
3373 /* invalid tx_seq */ 3837 /* invalid tx_seq */
3374 if (tx_seq_offset >= chan->tx_win) { 3838 if (tx_seq_offset >= chan->tx_win) {
@@ -3416,10 +3880,8 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3416 l2cap_send_srejframe(chan, tx_seq); 3880 l2cap_send_srejframe(chan, tx_seq);
3417 } 3881 }
3418 } else { 3882 } else {
3419 expected_tx_seq_offset = 3883 expected_tx_seq_offset = __seq_offset(chan,
3420 (chan->expected_tx_seq - chan->buffer_seq) % 64; 3884 chan->expected_tx_seq, chan->buffer_seq);
3421 if (expected_tx_seq_offset < 0)
3422 expected_tx_seq_offset += 64;
3423 3885
3424 /* duplicated tx_seq */ 3886 /* duplicated tx_seq */
3425 if (tx_seq_offset < expected_tx_seq_offset) 3887 if (tx_seq_offset < expected_tx_seq_offset)
@@ -3444,7 +3906,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3444 return 0; 3906 return 0;
3445 3907
3446expected: 3908expected:
3447 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3909 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3448 3910
3449 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 3911 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3450 bt_cb(skb)->tx_seq = tx_seq; 3912 bt_cb(skb)->tx_seq = tx_seq;
@@ -3454,13 +3916,14 @@ expected:
3454 } 3916 }
3455 3917
3456 err = l2cap_reassemble_sdu(chan, skb, rx_control); 3918 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3457 chan->buffer_seq = (chan->buffer_seq + 1) % 64; 3919 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3920
3458 if (err < 0) { 3921 if (err < 0) {
3459 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 3922 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3460 return err; 3923 return err;
3461 } 3924 }
3462 3925
3463 if (rx_control & L2CAP_CTRL_FINAL) { 3926 if (__is_ctrl_final(chan, rx_control)) {
3464 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 3927 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3465 l2cap_retransmit_frames(chan); 3928 l2cap_retransmit_frames(chan);
3466 } 3929 }
@@ -3478,15 +3941,15 @@ drop:
3478 return 0; 3941 return 0;
3479} 3942}
3480 3943
3481static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control) 3944static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3482{ 3945{
3483 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control), 3946 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3484 rx_control); 3947 __get_reqseq(chan, rx_control), rx_control);
3485 3948
3486 chan->expected_ack_seq = __get_reqseq(rx_control); 3949 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3487 l2cap_drop_acked_frames(chan); 3950 l2cap_drop_acked_frames(chan);
3488 3951
3489 if (rx_control & L2CAP_CTRL_POLL) { 3952 if (__is_ctrl_poll(chan, rx_control)) {
3490 set_bit(CONN_SEND_FBIT, &chan->conn_state); 3953 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3491 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 3954 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3492 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 3955 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3499,7 +3962,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
3499 l2cap_send_i_or_rr_or_rnr(chan); 3962 l2cap_send_i_or_rr_or_rnr(chan);
3500 } 3963 }
3501 3964
3502 } else if (rx_control & L2CAP_CTRL_FINAL) { 3965 } else if (__is_ctrl_final(chan, rx_control)) {
3503 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 3966 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3504 3967
3505 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 3968 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3518,18 +3981,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
3518 } 3981 }
3519} 3982}
3520 3983
3521static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control) 3984static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3522{ 3985{
3523 u8 tx_seq = __get_reqseq(rx_control); 3986 u16 tx_seq = __get_reqseq(chan, rx_control);
3524 3987
3525 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3988 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3526 3989
3527 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 3990 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3528 3991
3529 chan->expected_ack_seq = tx_seq; 3992 chan->expected_ack_seq = tx_seq;
3530 l2cap_drop_acked_frames(chan); 3993 l2cap_drop_acked_frames(chan);
3531 3994
3532 if (rx_control & L2CAP_CTRL_FINAL) { 3995 if (__is_ctrl_final(chan, rx_control)) {
3533 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 3996 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3534 l2cap_retransmit_frames(chan); 3997 l2cap_retransmit_frames(chan);
3535 } else { 3998 } else {
@@ -3539,15 +4002,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
3539 set_bit(CONN_REJ_ACT, &chan->conn_state); 4002 set_bit(CONN_REJ_ACT, &chan->conn_state);
3540 } 4003 }
3541} 4004}
3542static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) 4005static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3543{ 4006{
3544 u8 tx_seq = __get_reqseq(rx_control); 4007 u16 tx_seq = __get_reqseq(chan, rx_control);
3545 4008
3546 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 4009 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3547 4010
3548 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3549 4012
3550 if (rx_control & L2CAP_CTRL_POLL) { 4013 if (__is_ctrl_poll(chan, rx_control)) {
3551 chan->expected_ack_seq = tx_seq; 4014 chan->expected_ack_seq = tx_seq;
3552 l2cap_drop_acked_frames(chan); 4015 l2cap_drop_acked_frames(chan);
3553 4016
@@ -3560,7 +4023,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
3560 chan->srej_save_reqseq = tx_seq; 4023 chan->srej_save_reqseq = tx_seq;
3561 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4024 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3562 } 4025 }
3563 } else if (rx_control & L2CAP_CTRL_FINAL) { 4026 } else if (__is_ctrl_final(chan, rx_control)) {
3564 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && 4027 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3565 chan->srej_save_reqseq == tx_seq) 4028 chan->srej_save_reqseq == tx_seq)
3566 clear_bit(CONN_SREJ_ACT, &chan->conn_state); 4029 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3575,37 +4038,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
3575 } 4038 }
3576} 4039}
3577 4040
3578static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control) 4041static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3579{ 4042{
3580 u8 tx_seq = __get_reqseq(rx_control); 4043 u16 tx_seq = __get_reqseq(chan, rx_control);
3581 4044
3582 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 4045 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3583 4046
3584 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4047 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3585 chan->expected_ack_seq = tx_seq; 4048 chan->expected_ack_seq = tx_seq;
3586 l2cap_drop_acked_frames(chan); 4049 l2cap_drop_acked_frames(chan);
3587 4050
3588 if (rx_control & L2CAP_CTRL_POLL) 4051 if (__is_ctrl_poll(chan, rx_control))
3589 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4052 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3590 4053
3591 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4054 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3592 __clear_retrans_timer(chan); 4055 __clear_retrans_timer(chan);
3593 if (rx_control & L2CAP_CTRL_POLL) 4056 if (__is_ctrl_poll(chan, rx_control))
3594 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); 4057 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3595 return; 4058 return;
3596 } 4059 }
3597 4060
3598 if (rx_control & L2CAP_CTRL_POLL) 4061 if (__is_ctrl_poll(chan, rx_control)) {
3599 l2cap_send_srejtail(chan); 4062 l2cap_send_srejtail(chan);
3600 else 4063 } else {
3601 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY); 4064 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4065 l2cap_send_sframe(chan, rx_control);
4066 }
3602} 4067}
3603 4068
3604static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) 4069static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3605{ 4070{
3606 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); 4071 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3607 4072
3608 if (L2CAP_CTRL_FINAL & rx_control && 4073 if (__is_ctrl_final(chan, rx_control) &&
3609 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4074 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3610 __clear_monitor_timer(chan); 4075 __clear_monitor_timer(chan);
3611 if (chan->unacked_frames > 0) 4076 if (chan->unacked_frames > 0)
@@ -3613,20 +4078,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
3613 clear_bit(CONN_WAIT_F, &chan->conn_state); 4078 clear_bit(CONN_WAIT_F, &chan->conn_state);
3614 } 4079 }
3615 4080
3616 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 4081 switch (__get_ctrl_super(chan, rx_control)) {
3617 case L2CAP_SUPER_RCV_READY: 4082 case L2CAP_SUPER_RR:
3618 l2cap_data_channel_rrframe(chan, rx_control); 4083 l2cap_data_channel_rrframe(chan, rx_control);
3619 break; 4084 break;
3620 4085
3621 case L2CAP_SUPER_REJECT: 4086 case L2CAP_SUPER_REJ:
3622 l2cap_data_channel_rejframe(chan, rx_control); 4087 l2cap_data_channel_rejframe(chan, rx_control);
3623 break; 4088 break;
3624 4089
3625 case L2CAP_SUPER_SELECT_REJECT: 4090 case L2CAP_SUPER_SREJ:
3626 l2cap_data_channel_srejframe(chan, rx_control); 4091 l2cap_data_channel_srejframe(chan, rx_control);
3627 break; 4092 break;
3628 4093
3629 case L2CAP_SUPER_RCV_NOT_READY: 4094 case L2CAP_SUPER_RNR:
3630 l2cap_data_channel_rnrframe(chan, rx_control); 4095 l2cap_data_channel_rnrframe(chan, rx_control);
3631 break; 4096 break;
3632 } 4097 }
@@ -3638,12 +4103,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
3638static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 4103static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3639{ 4104{
3640 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 4105 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3641 u16 control; 4106 u32 control;
3642 u8 req_seq; 4107 u16 req_seq;
3643 int len, next_tx_seq_offset, req_seq_offset; 4108 int len, next_tx_seq_offset, req_seq_offset;
3644 4109
3645 control = get_unaligned_le16(skb->data); 4110 control = __get_control(chan, skb->data);
3646 skb_pull(skb, 2); 4111 skb_pull(skb, __ctrl_size(chan));
3647 len = skb->len; 4112 len = skb->len;
3648 4113
3649 /* 4114 /*
@@ -3654,26 +4119,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3654 if (l2cap_check_fcs(chan, skb)) 4119 if (l2cap_check_fcs(chan, skb))
3655 goto drop; 4120 goto drop;
3656 4121
3657 if (__is_sar_start(control) && __is_iframe(control)) 4122 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3658 len -= 2; 4123 len -= L2CAP_SDULEN_SIZE;
3659 4124
3660 if (chan->fcs == L2CAP_FCS_CRC16) 4125 if (chan->fcs == L2CAP_FCS_CRC16)
3661 len -= 2; 4126 len -= L2CAP_FCS_SIZE;
3662 4127
3663 if (len > chan->mps) { 4128 if (len > chan->mps) {
3664 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4129 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3665 goto drop; 4130 goto drop;
3666 } 4131 }
3667 4132
3668 req_seq = __get_reqseq(control); 4133 req_seq = __get_reqseq(chan, control);
3669 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3670 if (req_seq_offset < 0)
3671 req_seq_offset += 64;
3672 4134
3673 next_tx_seq_offset = 4135 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3674 (chan->next_tx_seq - chan->expected_ack_seq) % 64; 4136
3675 if (next_tx_seq_offset < 0) 4137 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3676 next_tx_seq_offset += 64; 4138 chan->expected_ack_seq);
3677 4139
3678 /* check for invalid req-seq */ 4140 /* check for invalid req-seq */
3679 if (req_seq_offset > next_tx_seq_offset) { 4141 if (req_seq_offset > next_tx_seq_offset) {
@@ -3681,7 +4143,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3681 goto drop; 4143 goto drop;
3682 } 4144 }
3683 4145
3684 if (__is_iframe(control)) { 4146 if (!__is_sframe(chan, control)) {
3685 if (len < 0) { 4147 if (len < 0) {
3686 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4148 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3687 goto drop; 4149 goto drop;
@@ -3709,8 +4171,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3709{ 4171{
3710 struct l2cap_chan *chan; 4172 struct l2cap_chan *chan;
3711 struct sock *sk = NULL; 4173 struct sock *sk = NULL;
3712 u16 control; 4174 u32 control;
3713 u8 tx_seq; 4175 u16 tx_seq;
3714 int len; 4176 int len;
3715 4177
3716 chan = l2cap_get_chan_by_scid(conn, cid); 4178 chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3751,23 +4213,23 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3751 goto done; 4213 goto done;
3752 4214
3753 case L2CAP_MODE_STREAMING: 4215 case L2CAP_MODE_STREAMING:
3754 control = get_unaligned_le16(skb->data); 4216 control = __get_control(chan, skb->data);
3755 skb_pull(skb, 2); 4217 skb_pull(skb, __ctrl_size(chan));
3756 len = skb->len; 4218 len = skb->len;
3757 4219
3758 if (l2cap_check_fcs(chan, skb)) 4220 if (l2cap_check_fcs(chan, skb))
3759 goto drop; 4221 goto drop;
3760 4222
3761 if (__is_sar_start(control)) 4223 if (__is_sar_start(chan, control))
3762 len -= 2; 4224 len -= L2CAP_SDULEN_SIZE;
3763 4225
3764 if (chan->fcs == L2CAP_FCS_CRC16) 4226 if (chan->fcs == L2CAP_FCS_CRC16)
3765 len -= 2; 4227 len -= L2CAP_FCS_SIZE;
3766 4228
3767 if (len > chan->mps || len < 0 || __is_sframe(control)) 4229 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3768 goto drop; 4230 goto drop;
3769 4231
3770 tx_seq = __get_txseq(control); 4232 tx_seq = __get_txseq(chan, control);
3771 4233
3772 if (chan->expected_tx_seq != tx_seq) { 4234 if (chan->expected_tx_seq != tx_seq) {
3773 /* Frame(s) missing - must discard partial SDU */ 4235 /* Frame(s) missing - must discard partial SDU */
@@ -3779,7 +4241,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3779 /* TODO: Notify userland of missing data */ 4241 /* TODO: Notify userland of missing data */
3780 } 4242 }
3781 4243
3782 chan->expected_tx_seq = (tx_seq + 1) % 64; 4244 chan->expected_tx_seq = __next_seq(chan, tx_seq);
3783 4245
3784 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) 4246 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3785 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4247 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3933,12 +4395,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3933 4395
3934 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 4396 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3935 lm1 |= HCI_LM_ACCEPT; 4397 lm1 |= HCI_LM_ACCEPT;
3936 if (c->role_switch) 4398 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3937 lm1 |= HCI_LM_MASTER; 4399 lm1 |= HCI_LM_MASTER;
3938 exact++; 4400 exact++;
3939 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 4401 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3940 lm2 |= HCI_LM_ACCEPT; 4402 lm2 |= HCI_LM_ACCEPT;
3941 if (c->role_switch) 4403 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3942 lm2 |= HCI_LM_MASTER; 4404 lm2 |= HCI_LM_MASTER;
3943 } 4405 }
3944 } 4406 }
@@ -3973,7 +4435,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
3973 BT_DBG("hcon %p", hcon); 4435 BT_DBG("hcon %p", hcon);
3974 4436
3975 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) 4437 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3976 return 0x13; 4438 return HCI_ERROR_REMOTE_USER_TERM;
3977 4439
3978 return conn->disc_reason; 4440 return conn->disc_reason;
3979} 4441}
@@ -4306,3 +4768,6 @@ void l2cap_exit(void)
4306 4768
4307module_param(disable_ertm, bool, 0644); 4769module_param(disable_ertm, bool, 0644);
4308MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 4770MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4771
4772module_param(enable_hs, bool, 0644);
4773MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e8292369cdcf..567b585d9805 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -333,7 +333,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
333 opts.mode = chan->mode; 333 opts.mode = chan->mode;
334 opts.fcs = chan->fcs; 334 opts.fcs = chan->fcs;
335 opts.max_tx = chan->max_tx; 335 opts.max_tx = chan->max_tx;
336 opts.txwin_size = (__u16)chan->tx_win; 336 opts.txwin_size = chan->tx_win;
337 337
338 len = min_t(unsigned int, len, sizeof(opts)); 338 len = min_t(unsigned int, len, sizeof(opts));
339 if (copy_to_user(optval, (char *) &opts, len)) 339 if (copy_to_user(optval, (char *) &opts, len))
@@ -358,10 +358,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
358 break; 358 break;
359 } 359 }
360 360
361 if (chan->role_switch) 361 if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
362 opt |= L2CAP_LM_MASTER; 362 opt |= L2CAP_LM_MASTER;
363 363
364 if (chan->force_reliable) 364 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
365 opt |= L2CAP_LM_RELIABLE; 365 opt |= L2CAP_LM_RELIABLE;
366 366
367 if (put_user(opt, (u32 __user *) optval)) 367 if (put_user(opt, (u32 __user *) optval))
@@ -448,7 +448,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
448 break; 448 break;
449 449
450 case BT_FLUSHABLE: 450 case BT_FLUSHABLE:
451 if (put_user(chan->flushable, (u32 __user *) optval)) 451 if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
452 (u32 __user *) optval))
452 err = -EFAULT; 453 err = -EFAULT;
453 454
454 break; 455 break;
@@ -460,7 +461,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
460 break; 461 break;
461 } 462 }
462 463
463 pwr.force_active = chan->force_active; 464 pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
464 465
465 len = min_t(unsigned int, len, sizeof(pwr)); 466 len = min_t(unsigned int, len, sizeof(pwr));
466 if (copy_to_user(optval, (char *) &pwr, len)) 467 if (copy_to_user(optval, (char *) &pwr, len))
@@ -468,6 +469,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
468 469
469 break; 470 break;
470 471
472 case BT_CHANNEL_POLICY:
473 if (!enable_hs) {
474 err = -ENOPROTOOPT;
475 break;
476 }
477
478 if (put_user(chan->chan_policy, (u32 __user *) optval))
479 err = -EFAULT;
480 break;
481
471 default: 482 default:
472 err = -ENOPROTOOPT; 483 err = -ENOPROTOOPT;
473 break; 484 break;
@@ -502,7 +513,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
502 opts.mode = chan->mode; 513 opts.mode = chan->mode;
503 opts.fcs = chan->fcs; 514 opts.fcs = chan->fcs;
504 opts.max_tx = chan->max_tx; 515 opts.max_tx = chan->max_tx;
505 opts.txwin_size = (__u16)chan->tx_win; 516 opts.txwin_size = chan->tx_win;
506 517
507 len = min_t(unsigned int, sizeof(opts), optlen); 518 len = min_t(unsigned int, sizeof(opts), optlen);
508 if (copy_from_user((char *) &opts, optval, len)) { 519 if (copy_from_user((char *) &opts, optval, len)) {
@@ -510,7 +521,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
510 break; 521 break;
511 } 522 }
512 523
513 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { 524 if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
514 err = -EINVAL; 525 err = -EINVAL;
515 break; 526 break;
516 } 527 }
@@ -534,7 +545,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
534 chan->omtu = opts.omtu; 545 chan->omtu = opts.omtu;
535 chan->fcs = opts.fcs; 546 chan->fcs = opts.fcs;
536 chan->max_tx = opts.max_tx; 547 chan->max_tx = opts.max_tx;
537 chan->tx_win = (__u8)opts.txwin_size; 548 chan->tx_win = opts.txwin_size;
538 break; 549 break;
539 550
540 case L2CAP_LM: 551 case L2CAP_LM:
@@ -550,8 +561,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
550 if (opt & L2CAP_LM_SECURE) 561 if (opt & L2CAP_LM_SECURE)
551 chan->sec_level = BT_SECURITY_HIGH; 562 chan->sec_level = BT_SECURITY_HIGH;
552 563
553 chan->role_switch = (opt & L2CAP_LM_MASTER); 564 if (opt & L2CAP_LM_MASTER)
554 chan->force_reliable = (opt & L2CAP_LM_RELIABLE); 565 set_bit(FLAG_ROLE_SWITCH, &chan->flags);
566 else
567 clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
568
569 if (opt & L2CAP_LM_RELIABLE)
570 set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
571 else
572 clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
555 break; 573 break;
556 574
557 default: 575 default:
@@ -657,7 +675,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
657 } 675 }
658 } 676 }
659 677
660 chan->flushable = opt; 678 if (opt)
679 set_bit(FLAG_FLUSHABLE, &chan->flags);
680 else
681 clear_bit(FLAG_FLUSHABLE, &chan->flags);
661 break; 682 break;
662 683
663 case BT_POWER: 684 case BT_POWER:
@@ -674,7 +695,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
674 err = -EFAULT; 695 err = -EFAULT;
675 break; 696 break;
676 } 697 }
677 chan->force_active = pwr.force_active; 698
699 if (pwr.force_active)
700 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
701 else
702 clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
703 break;
704
705 case BT_CHANNEL_POLICY:
706 if (!enable_hs) {
707 err = -ENOPROTOOPT;
708 break;
709 }
710
711 if (get_user(opt, (u32 __user *) optval)) {
712 err = -EFAULT;
713 break;
714 }
715
716 if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
717 err = -EINVAL;
718 break;
719 }
720
721 if (chan->mode != L2CAP_MODE_ERTM &&
722 chan->mode != L2CAP_MODE_STREAMING) {
723 err = -EOPNOTSUPP;
724 break;
725 }
726
727 chan->chan_policy = (u8) opt;
678 break; 728 break;
679 729
680 default: 730 default:
@@ -708,7 +758,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
708 return -ENOTCONN; 758 return -ENOTCONN;
709 } 759 }
710 760
711 err = l2cap_chan_send(chan, msg, len); 761 err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
712 762
713 release_sock(sk); 763 release_sock(sk);
714 return err; 764 return err;
@@ -930,11 +980,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
930 chan->fcs = pchan->fcs; 980 chan->fcs = pchan->fcs;
931 chan->max_tx = pchan->max_tx; 981 chan->max_tx = pchan->max_tx;
932 chan->tx_win = pchan->tx_win; 982 chan->tx_win = pchan->tx_win;
983 chan->tx_win_max = pchan->tx_win_max;
933 chan->sec_level = pchan->sec_level; 984 chan->sec_level = pchan->sec_level;
934 chan->role_switch = pchan->role_switch; 985 chan->flags = pchan->flags;
935 chan->force_reliable = pchan->force_reliable;
936 chan->flushable = pchan->flushable;
937 chan->force_active = pchan->force_active;
938 986
939 security_sk_clone(parent, sk); 987 security_sk_clone(parent, sk);
940 } else { 988 } else {
@@ -963,12 +1011,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
963 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 1011 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
964 chan->fcs = L2CAP_FCS_CRC16; 1012 chan->fcs = L2CAP_FCS_CRC16;
965 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 1013 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1014 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
966 chan->sec_level = BT_SECURITY_LOW; 1015 chan->sec_level = BT_SECURITY_LOW;
967 chan->role_switch = 0; 1016 chan->flags = 0;
968 chan->force_reliable = 0; 1017 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
969 chan->flushable = BT_FLUSHABLE_OFF;
970 chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
971
972 } 1018 }
973 1019
974 /* Default config options */ 1020 /* Default config options */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 5caff4d47596..a6720c6a4d2c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -32,22 +32,23 @@
32#define MGMT_VERSION 0 32#define MGMT_VERSION 0
33#define MGMT_REVISION 1 33#define MGMT_REVISION 1
34 34
35#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
36
35struct pending_cmd { 37struct pending_cmd {
36 struct list_head list; 38 struct list_head list;
37 __u16 opcode; 39 u16 opcode;
38 int index; 40 int index;
39 void *param; 41 void *param;
40 struct sock *sk; 42 struct sock *sk;
41 void *user_data; 43 void *user_data;
42}; 44};
43 45
44static LIST_HEAD(cmd_list);
45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{ 47{
48 struct sk_buff *skb; 48 struct sk_buff *skb;
49 struct mgmt_hdr *hdr; 49 struct mgmt_hdr *hdr;
50 struct mgmt_ev_cmd_status *ev; 50 struct mgmt_ev_cmd_status *ev;
51 int err;
51 52
52 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 53 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
53 54
@@ -65,10 +66,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
65 ev->status = status; 66 ev->status = status;
66 put_unaligned_le16(cmd, &ev->opcode); 67 put_unaligned_le16(cmd, &ev->opcode);
67 68
68 if (sock_queue_rcv_skb(sk, skb) < 0) 69 err = sock_queue_rcv_skb(sk, skb);
70 if (err < 0)
69 kfree_skb(skb); 71 kfree_skb(skb);
70 72
71 return 0; 73 return err;
72} 74}
73 75
74static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, 76static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -77,6 +79,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
77 struct sk_buff *skb; 79 struct sk_buff *skb;
78 struct mgmt_hdr *hdr; 80 struct mgmt_hdr *hdr;
79 struct mgmt_ev_cmd_complete *ev; 81 struct mgmt_ev_cmd_complete *ev;
82 int err;
80 83
81 BT_DBG("sock %p", sk); 84 BT_DBG("sock %p", sk);
82 85
@@ -96,10 +99,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
96 if (rp) 99 if (rp)
97 memcpy(ev->data, rp, rp_len); 100 memcpy(ev->data, rp, rp_len);
98 101
99 if (sock_queue_rcv_skb(sk, skb) < 0) 102 err = sock_queue_rcv_skb(sk, skb);
103 if (err < 0)
100 kfree_skb(skb); 104 kfree_skb(skb);
101 105
102 return 0; 106 return err;;
103} 107}
104 108
105static int read_version(struct sock *sk) 109static int read_version(struct sock *sk)
@@ -119,6 +123,7 @@ static int read_index_list(struct sock *sk)
119{ 123{
120 struct mgmt_rp_read_index_list *rp; 124 struct mgmt_rp_read_index_list *rp;
121 struct list_head *p; 125 struct list_head *p;
126 struct hci_dev *d;
122 size_t rp_len; 127 size_t rp_len;
123 u16 count; 128 u16 count;
124 int i, err; 129 int i, err;
@@ -142,10 +147,9 @@ static int read_index_list(struct sock *sk)
142 put_unaligned_le16(count, &rp->num_controllers); 147 put_unaligned_le16(count, &rp->num_controllers);
143 148
144 i = 0; 149 i = 0;
145 list_for_each(p, &hci_dev_list) { 150 list_for_each_entry(d, &hci_dev_list, list) {
146 struct hci_dev *d = list_entry(p, struct hci_dev, list); 151 if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
147 152 cancel_delayed_work(&d->power_off);
148 hci_del_off_timer(d);
149 153
150 if (test_bit(HCI_SETUP, &d->flags)) 154 if (test_bit(HCI_SETUP, &d->flags))
151 continue; 155 continue;
@@ -175,7 +179,8 @@ static int read_controller_info(struct sock *sk, u16 index)
175 if (!hdev) 179 if (!hdev)
176 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); 180 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
177 181
178 hci_del_off_timer(hdev); 182 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
183 cancel_delayed_work_sync(&hdev->power_off);
179 184
180 hci_dev_lock_bh(hdev); 185 hci_dev_lock_bh(hdev);
181 186
@@ -220,7 +225,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
220} 225}
221 226
222static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 227static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
223 u16 index, void *data, u16 len) 228 struct hci_dev *hdev,
229 void *data, u16 len)
224{ 230{
225 struct pending_cmd *cmd; 231 struct pending_cmd *cmd;
226 232
@@ -229,7 +235,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
229 return NULL; 235 return NULL;
230 236
231 cmd->opcode = opcode; 237 cmd->opcode = opcode;
232 cmd->index = index; 238 cmd->index = hdev->id;
233 239
234 cmd->param = kmalloc(len, GFP_ATOMIC); 240 cmd->param = kmalloc(len, GFP_ATOMIC);
235 if (!cmd->param) { 241 if (!cmd->param) {
@@ -243,48 +249,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
243 cmd->sk = sk; 249 cmd->sk = sk;
244 sock_hold(sk); 250 sock_hold(sk);
245 251
246 list_add(&cmd->list, &cmd_list); 252 list_add(&cmd->list, &hdev->mgmt_pending);
247 253
248 return cmd; 254 return cmd;
249} 255}
250 256
251static void mgmt_pending_foreach(u16 opcode, int index, 257static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
252 void (*cb)(struct pending_cmd *cmd, void *data), 258 void (*cb)(struct pending_cmd *cmd, void *data),
253 void *data) 259 void *data)
254{ 260{
255 struct list_head *p, *n; 261 struct list_head *p, *n;
256 262
257 list_for_each_safe(p, n, &cmd_list) { 263 list_for_each_safe(p, n, &hdev->mgmt_pending) {
258 struct pending_cmd *cmd; 264 struct pending_cmd *cmd;
259 265
260 cmd = list_entry(p, struct pending_cmd, list); 266 cmd = list_entry(p, struct pending_cmd, list);
261 267
262 if (cmd->opcode != opcode) 268 if (opcode > 0 && cmd->opcode != opcode)
263 continue;
264
265 if (index >= 0 && cmd->index != index)
266 continue; 269 continue;
267 270
268 cb(cmd, data); 271 cb(cmd, data);
269 } 272 }
270} 273}
271 274
272static struct pending_cmd *mgmt_pending_find(u16 opcode, int index) 275static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
273{ 276{
274 struct list_head *p; 277 struct pending_cmd *cmd;
275
276 list_for_each(p, &cmd_list) {
277 struct pending_cmd *cmd;
278
279 cmd = list_entry(p, struct pending_cmd, list);
280
281 if (cmd->opcode != opcode)
282 continue;
283
284 if (index >= 0 && cmd->index != index)
285 continue;
286 278
287 return cmd; 279 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
280 if (cmd->opcode == opcode)
281 return cmd;
288 } 282 }
289 283
290 return NULL; 284 return NULL;
@@ -322,12 +316,12 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
322 goto failed; 316 goto failed;
323 } 317 }
324 318
325 if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { 319 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
326 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); 320 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
327 goto failed; 321 goto failed;
328 } 322 }
329 323
330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); 324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
331 if (!cmd) { 325 if (!cmd) {
332 err = -ENOMEM; 326 err = -ENOMEM;
333 goto failed; 327 goto failed;
@@ -336,7 +330,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
336 if (cp->val) 330 if (cp->val)
337 queue_work(hdev->workqueue, &hdev->power_on); 331 queue_work(hdev->workqueue, &hdev->power_on);
338 else 332 else
339 queue_work(hdev->workqueue, &hdev->power_off); 333 queue_work(hdev->workqueue, &hdev->power_off.work);
340 334
341 err = 0; 335 err = 0;
342 336
@@ -349,7 +343,7 @@ failed:
349static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, 343static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
350 u16 len) 344 u16 len)
351{ 345{
352 struct mgmt_mode *cp; 346 struct mgmt_cp_set_discoverable *cp;
353 struct hci_dev *hdev; 347 struct hci_dev *hdev;
354 struct pending_cmd *cmd; 348 struct pending_cmd *cmd;
355 u8 scan; 349 u8 scan;
@@ -373,8 +367,8 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
373 goto failed; 367 goto failed;
374 } 368 }
375 369
376 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || 370 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
377 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { 371 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
378 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); 372 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
379 goto failed; 373 goto failed;
380 } 374 }
@@ -385,7 +379,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
385 goto failed; 379 goto failed;
386 } 380 }
387 381
388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); 382 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
389 if (!cmd) { 383 if (!cmd) {
390 err = -ENOMEM; 384 err = -ENOMEM;
391 goto failed; 385 goto failed;
@@ -395,11 +389,16 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
395 389
396 if (cp->val) 390 if (cp->val)
397 scan |= SCAN_INQUIRY; 391 scan |= SCAN_INQUIRY;
392 else
393 cancel_delayed_work(&hdev->discov_off);
398 394
399 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 395 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
400 if (err < 0) 396 if (err < 0)
401 mgmt_pending_remove(cmd); 397 mgmt_pending_remove(cmd);
402 398
399 if (cp->val)
400 hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
401
403failed: 402failed:
404 hci_dev_unlock_bh(hdev); 403 hci_dev_unlock_bh(hdev);
405 hci_dev_put(hdev); 404 hci_dev_put(hdev);
@@ -434,8 +433,8 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
434 goto failed; 433 goto failed;
435 } 434 }
436 435
437 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || 436 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
438 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { 437 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
439 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); 438 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
440 goto failed; 439 goto failed;
441 } 440 }
@@ -445,7 +444,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
445 goto failed; 444 goto failed;
446 } 445 }
447 446
448 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); 447 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
449 if (!cmd) { 448 if (!cmd) {
450 err = -ENOMEM; 449 err = -ENOMEM;
451 goto failed; 450 goto failed;
@@ -467,8 +466,8 @@ failed:
467 return err; 466 return err;
468} 467}
469 468
470static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, 469static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
471 struct sock *skip_sk) 470 u16 data_len, struct sock *skip_sk)
472{ 471{
473 struct sk_buff *skb; 472 struct sk_buff *skb;
474 struct mgmt_hdr *hdr; 473 struct mgmt_hdr *hdr;
@@ -481,7 +480,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
481 480
482 hdr = (void *) skb_put(skb, sizeof(*hdr)); 481 hdr = (void *) skb_put(skb, sizeof(*hdr));
483 hdr->opcode = cpu_to_le16(event); 482 hdr->opcode = cpu_to_le16(event);
484 hdr->index = cpu_to_le16(index); 483 if (hdev)
484 hdr->index = cpu_to_le16(hdev->id);
485 else
486 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
485 hdr->len = cpu_to_le16(data_len); 487 hdr->len = cpu_to_le16(data_len);
486 488
487 if (data) 489 if (data)
@@ -533,7 +535,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
533 535
534 ev.val = cp->val; 536 ev.val = cp->val;
535 537
536 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, hdev, &ev, sizeof(ev), sk);
537 539
538failed: 540failed:
539 hci_dev_unlock_bh(hdev); 541 hci_dev_unlock_bh(hdev);
@@ -586,7 +588,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
586 u16 eir_len = 0; 588 u16 eir_len = 0;
587 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; 589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
588 int i, truncated = 0; 590 int i, truncated = 0;
589 struct list_head *p; 591 struct bt_uuid *uuid;
590 size_t name_len; 592 size_t name_len;
591 593
592 name_len = strlen(hdev->dev_name); 594 name_len = strlen(hdev->dev_name);
@@ -611,8 +613,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
611 memset(uuid16_list, 0, sizeof(uuid16_list)); 613 memset(uuid16_list, 0, sizeof(uuid16_list));
612 614
613 /* Group all UUID16 types */ 615 /* Group all UUID16 types */
614 list_for_each(p, &hdev->uuids) { 616 list_for_each_entry(uuid, &hdev->uuids, list) {
615 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
616 u16 uuid16; 617 u16 uuid16;
617 618
618 uuid16 = get_uuid16(uuid->uuid); 619 uuid16 = get_uuid16(uuid->uuid);
@@ -688,14 +689,11 @@ static int update_eir(struct hci_dev *hdev)
688 689
689static u8 get_service_classes(struct hci_dev *hdev) 690static u8 get_service_classes(struct hci_dev *hdev)
690{ 691{
691 struct list_head *p; 692 struct bt_uuid *uuid;
692 u8 val = 0; 693 u8 val = 0;
693 694
694 list_for_each(p, &hdev->uuids) { 695 list_for_each_entry(uuid, &hdev->uuids, list)
695 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
696
697 val |= uuid->svc_hint; 696 val |= uuid->svc_hint;
698 }
699 697
700 return val; 698 return val;
701} 699}
@@ -894,6 +892,9 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
894 if (err == 0) 892 if (err == 0)
895 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 893 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
896 0); 894 0);
895 else
896 cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, -err);
897
897 898
898 hci_dev_unlock_bh(hdev); 899 hci_dev_unlock_bh(hdev);
899 hci_dev_put(hdev); 900 hci_dev_put(hdev);
@@ -901,30 +902,32 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
901 return err; 902 return err;
902} 903}
903 904
904static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) 905static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
906 u16 len)
905{ 907{
906 struct hci_dev *hdev; 908 struct hci_dev *hdev;
907 struct mgmt_cp_load_keys *cp; 909 struct mgmt_cp_load_link_keys *cp;
908 u16 key_count, expected_len; 910 u16 key_count, expected_len;
909 int i; 911 int i;
910 912
911 cp = (void *) data; 913 cp = (void *) data;
912 914
913 if (len < sizeof(*cp)) 915 if (len < sizeof(*cp))
914 return -EINVAL; 916 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
915 917
916 key_count = get_unaligned_le16(&cp->key_count); 918 key_count = get_unaligned_le16(&cp->key_count);
917 919
918 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 920 expected_len = sizeof(*cp) + key_count *
921 sizeof(struct mgmt_link_key_info);
919 if (expected_len != len) { 922 if (expected_len != len) {
920 BT_ERR("load_keys: expected %u bytes, got %u bytes", 923 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
921 len, expected_len); 924 len, expected_len);
922 return -EINVAL; 925 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
923 } 926 }
924 927
925 hdev = hci_dev_get(index); 928 hdev = hci_dev_get(index);
926 if (!hdev) 929 if (!hdev)
927 return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); 930 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, ENODEV);
928 931
929 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 932 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
930 key_count); 933 key_count);
@@ -941,7 +944,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
941 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 944 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
942 945
943 for (i = 0; i < key_count; i++) { 946 for (i = 0; i < key_count; i++) {
944 struct mgmt_key_info *key = &cp->keys[i]; 947 struct mgmt_link_key_info *key = &cp->keys[i];
945 948
946 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 949 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
947 key->pin_len); 950 key->pin_len);
@@ -953,27 +956,28 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
953 return 0; 956 return 0;
954} 957}
955 958
956static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 959static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
960 u16 len)
957{ 961{
958 struct hci_dev *hdev; 962 struct hci_dev *hdev;
959 struct mgmt_cp_remove_key *cp; 963 struct mgmt_cp_remove_keys *cp;
960 struct hci_conn *conn; 964 struct hci_conn *conn;
961 int err; 965 int err;
962 966
963 cp = (void *) data; 967 cp = (void *) data;
964 968
965 if (len != sizeof(*cp)) 969 if (len != sizeof(*cp))
966 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); 970 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, EINVAL);
967 971
968 hdev = hci_dev_get(index); 972 hdev = hci_dev_get(index);
969 if (!hdev) 973 if (!hdev)
970 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 974 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, ENODEV);
971 975
972 hci_dev_lock_bh(hdev); 976 hci_dev_lock_bh(hdev);
973 977
974 err = hci_remove_link_key(hdev, &cp->bdaddr); 978 err = hci_remove_link_key(hdev, &cp->bdaddr);
975 if (err < 0) { 979 if (err < 0) {
976 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); 980 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, -err);
977 goto unlock; 981 goto unlock;
978 } 982 }
979 983
@@ -1025,7 +1029,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1025 goto failed; 1029 goto failed;
1026 } 1030 }
1027 1031
1028 if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { 1032 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1029 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); 1033 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
1030 goto failed; 1034 goto failed;
1031 } 1035 }
@@ -1039,7 +1043,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1039 goto failed; 1043 goto failed;
1040 } 1044 }
1041 1045
1042 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); 1046 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1043 if (!cmd) { 1047 if (!cmd) {
1044 err = -ENOMEM; 1048 err = -ENOMEM;
1045 goto failed; 1049 goto failed;
@@ -1059,10 +1063,23 @@ failed:
1059 return err; 1063 return err;
1060} 1064}
1061 1065
1066static u8 link_to_mgmt(u8 link_type)
1067{
1068 switch (link_type) {
1069 case LE_LINK:
1070 return MGMT_ADDR_LE;
1071 case ACL_LINK:
1072 return MGMT_ADDR_BREDR;
1073 default:
1074 return MGMT_ADDR_INVALID;
1075 }
1076}
1077
1062static int get_connections(struct sock *sk, u16 index) 1078static int get_connections(struct sock *sk, u16 index)
1063{ 1079{
1064 struct mgmt_rp_get_connections *rp; 1080 struct mgmt_rp_get_connections *rp;
1065 struct hci_dev *hdev; 1081 struct hci_dev *hdev;
1082 struct hci_conn *c;
1066 struct list_head *p; 1083 struct list_head *p;
1067 size_t rp_len; 1084 size_t rp_len;
1068 u16 count; 1085 u16 count;
@@ -1081,7 +1098,7 @@ static int get_connections(struct sock *sk, u16 index)
1081 count++; 1098 count++;
1082 } 1099 }
1083 1100
1084 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t)); 1101 rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
1085 rp = kmalloc(rp_len, GFP_ATOMIC); 1102 rp = kmalloc(rp_len, GFP_ATOMIC);
1086 if (!rp) { 1103 if (!rp) {
1087 err = -ENOMEM; 1104 err = -ENOMEM;
@@ -1091,12 +1108,17 @@ static int get_connections(struct sock *sk, u16 index)
1091 put_unaligned_le16(count, &rp->conn_count); 1108 put_unaligned_le16(count, &rp->conn_count);
1092 1109
1093 i = 0; 1110 i = 0;
1094 list_for_each(p, &hdev->conn_hash.list) { 1111 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1095 struct hci_conn *c = list_entry(p, struct hci_conn, list); 1112 bacpy(&rp->addr[i].bdaddr, &c->dst);
1096 1113 rp->addr[i].type = link_to_mgmt(c->type);
1097 bacpy(&rp->conn[i++], &c->dst); 1114 if (rp->addr[i].type == MGMT_ADDR_INVALID)
1115 continue;
1116 i++;
1098 } 1117 }
1099 1118
1119 /* Recalculate length in case of filtered SCO connections, etc */
1120 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1121
1100 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1122 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1101 1123
1102unlock: 1124unlock:
@@ -1112,7 +1134,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index,
1112 struct pending_cmd *cmd; 1134 struct pending_cmd *cmd;
1113 int err; 1135 int err;
1114 1136
1115 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp, 1137 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1116 sizeof(*cp)); 1138 sizeof(*cp));
1117 if (!cmd) 1139 if (!cmd)
1118 return -ENOMEM; 1140 return -ENOMEM;
@@ -1173,7 +1195,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1173 goto failed; 1195 goto failed;
1174 } 1196 }
1175 1197
1176 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); 1198 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1177 if (!cmd) { 1199 if (!cmd) {
1178 err = -ENOMEM; 1200 err = -ENOMEM;
1179 goto failed; 1201 goto failed;
@@ -1264,19 +1286,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1264static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1286static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1265{ 1287{
1266 struct hci_dev *hdev = conn->hdev; 1288 struct hci_dev *hdev = conn->hdev;
1267 struct list_head *p; 1289 struct pending_cmd *cmd;
1268
1269 list_for_each(p, &cmd_list) {
1270 struct pending_cmd *cmd;
1271
1272 cmd = list_entry(p, struct pending_cmd, list);
1273 1290
1291 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1274 if (cmd->opcode != MGMT_OP_PAIR_DEVICE) 1292 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1275 continue; 1293 continue;
1276 1294
1277 if (cmd->index != hdev->id)
1278 continue;
1279
1280 if (cmd->user_data != conn) 1295 if (cmd->user_data != conn)
1281 continue; 1296 continue;
1282 1297
@@ -1309,16 +1324,19 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
1309static void pairing_complete_cb(struct hci_conn *conn, u8 status) 1324static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1310{ 1325{
1311 struct pending_cmd *cmd; 1326 struct pending_cmd *cmd;
1327 struct hci_dev *hdev = conn->hdev;
1312 1328
1313 BT_DBG("status %u", status); 1329 BT_DBG("status %u", status);
1314 1330
1331 hci_dev_lock_bh(hdev);
1332
1315 cmd = find_pairing(conn); 1333 cmd = find_pairing(conn);
1316 if (!cmd) { 1334 if (!cmd)
1317 BT_DBG("Unable to find a pending command"); 1335 BT_DBG("Unable to find a pending command");
1318 return; 1336 else
1319 } 1337 pairing_complete(cmd, status);
1320 1338
1321 pairing_complete(cmd, status); 1339 hci_dev_unlock_bh(hdev);
1322} 1340}
1323 1341
1324static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) 1342static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1369,7 +1387,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1369 goto unlock; 1387 goto unlock;
1370 } 1388 }
1371 1389
1372 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); 1390 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
1373 if (!cmd) { 1391 if (!cmd) {
1374 err = -ENOMEM; 1392 err = -ENOMEM;
1375 hci_conn_put(conn); 1393 hci_conn_put(conn);
@@ -1431,7 +1449,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1431 goto failed; 1449 goto failed;
1432 } 1450 }
1433 1451
1434 cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); 1452 cmd = mgmt_pending_add(sk, mgmt_op, hdev, data, len);
1435 if (!cmd) { 1453 if (!cmd) {
1436 err = -ENOMEM; 1454 err = -ENOMEM;
1437 goto failed; 1455 goto failed;
@@ -1468,7 +1486,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1468 1486
1469 hci_dev_lock_bh(hdev); 1487 hci_dev_lock_bh(hdev);
1470 1488
1471 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); 1489 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
1472 if (!cmd) { 1490 if (!cmd) {
1473 err = -ENOMEM; 1491 err = -ENOMEM;
1474 goto failed; 1492 goto failed;
@@ -1514,12 +1532,12 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1514 goto unlock; 1532 goto unlock;
1515 } 1533 }
1516 1534
1517 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) { 1535 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
1518 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY); 1536 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1519 goto unlock; 1537 goto unlock;
1520 } 1538 }
1521 1539
1522 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0); 1540 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
1523 if (!cmd) { 1541 if (!cmd) {
1524 err = -ENOMEM; 1542 err = -ENOMEM;
1525 goto unlock; 1543 goto unlock;
@@ -1606,8 +1624,6 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1606 1624
1607static int start_discovery(struct sock *sk, u16 index) 1625static int start_discovery(struct sock *sk, u16 index)
1608{ 1626{
1609 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1610 struct hci_cp_inquiry cp;
1611 struct pending_cmd *cmd; 1627 struct pending_cmd *cmd;
1612 struct hci_dev *hdev; 1628 struct hci_dev *hdev;
1613 int err; 1629 int err;
@@ -1620,18 +1636,18 @@ static int start_discovery(struct sock *sk, u16 index)
1620 1636
1621 hci_dev_lock_bh(hdev); 1637 hci_dev_lock_bh(hdev);
1622 1638
1623 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0); 1639 if (!test_bit(HCI_UP, &hdev->flags)) {
1640 err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENETDOWN);
1641 goto failed;
1642 }
1643
1644 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
1624 if (!cmd) { 1645 if (!cmd) {
1625 err = -ENOMEM; 1646 err = -ENOMEM;
1626 goto failed; 1647 goto failed;
1627 } 1648 }
1628 1649
1629 memset(&cp, 0, sizeof(cp)); 1650 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
1630 memcpy(&cp.lap, lap, 3);
1631 cp.length = 0x08;
1632 cp.num_rsp = 0x00;
1633
1634 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1635 if (err < 0) 1651 if (err < 0)
1636 mgmt_pending_remove(cmd); 1652 mgmt_pending_remove(cmd);
1637 1653
@@ -1656,13 +1672,13 @@ static int stop_discovery(struct sock *sk, u16 index)
1656 1672
1657 hci_dev_lock_bh(hdev); 1673 hci_dev_lock_bh(hdev);
1658 1674
1659 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0); 1675 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
1660 if (!cmd) { 1676 if (!cmd) {
1661 err = -ENOMEM; 1677 err = -ENOMEM;
1662 goto failed; 1678 goto failed;
1663 } 1679 }
1664 1680
1665 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 1681 err = hci_cancel_inquiry(hdev);
1666 if (err < 0) 1682 if (err < 0)
1667 mgmt_pending_remove(cmd); 1683 mgmt_pending_remove(cmd);
1668 1684
@@ -1677,7 +1693,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1677 u16 len) 1693 u16 len)
1678{ 1694{
1679 struct hci_dev *hdev; 1695 struct hci_dev *hdev;
1680 struct pending_cmd *cmd;
1681 struct mgmt_cp_block_device *cp = (void *) data; 1696 struct mgmt_cp_block_device *cp = (void *) data;
1682 int err; 1697 int err;
1683 1698
@@ -1694,23 +1709,13 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1694 1709
1695 hci_dev_lock_bh(hdev); 1710 hci_dev_lock_bh(hdev);
1696 1711
1697 cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
1698 if (!cmd) {
1699 err = -ENOMEM;
1700 goto failed;
1701 }
1702
1703 err = hci_blacklist_add(hdev, &cp->bdaddr); 1712 err = hci_blacklist_add(hdev, &cp->bdaddr);
1704
1705 if (err < 0) 1713 if (err < 0)
1706 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err); 1714 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
1707 else 1715 else
1708 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 1716 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1709 NULL, 0); 1717 NULL, 0);
1710 1718
1711 mgmt_pending_remove(cmd);
1712
1713failed:
1714 hci_dev_unlock_bh(hdev); 1719 hci_dev_unlock_bh(hdev);
1715 hci_dev_put(hdev); 1720 hci_dev_put(hdev);
1716 1721
@@ -1721,7 +1726,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len) 1726 u16 len)
1722{ 1727{
1723 struct hci_dev *hdev; 1728 struct hci_dev *hdev;
1724 struct pending_cmd *cmd;
1725 struct mgmt_cp_unblock_device *cp = (void *) data; 1729 struct mgmt_cp_unblock_device *cp = (void *) data;
1726 int err; 1730 int err;
1727 1731
@@ -1738,12 +1742,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1738 1742
1739 hci_dev_lock_bh(hdev); 1743 hci_dev_lock_bh(hdev);
1740 1744
1741 cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
1742 if (!cmd) {
1743 err = -ENOMEM;
1744 goto failed;
1745 }
1746
1747 err = hci_blacklist_del(hdev, &cp->bdaddr); 1745 err = hci_blacklist_del(hdev, &cp->bdaddr);
1748 1746
1749 if (err < 0) 1747 if (err < 0)
@@ -1752,9 +1750,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1752 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, 1750 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1753 NULL, 0); 1751 NULL, 0);
1754 1752
1755 mgmt_pending_remove(cmd);
1756
1757failed:
1758 hci_dev_unlock_bh(hdev); 1753 hci_dev_unlock_bh(hdev);
1759 hci_dev_put(hdev); 1754 hci_dev_put(hdev);
1760 1755
@@ -1882,11 +1877,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1882 case MGMT_OP_SET_SERVICE_CACHE: 1877 case MGMT_OP_SET_SERVICE_CACHE:
1883 err = set_service_cache(sk, index, buf + sizeof(*hdr), len); 1878 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
1884 break; 1879 break;
1885 case MGMT_OP_LOAD_KEYS: 1880 case MGMT_OP_LOAD_LINK_KEYS:
1886 err = load_keys(sk, index, buf + sizeof(*hdr), len); 1881 err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
1887 break; 1882 break;
1888 case MGMT_OP_REMOVE_KEY: 1883 case MGMT_OP_REMOVE_KEYS:
1889 err = remove_key(sk, index, buf + sizeof(*hdr), len); 1884 err = remove_keys(sk, index, buf + sizeof(*hdr), len);
1890 break; 1885 break;
1891 case MGMT_OP_DISCONNECT: 1886 case MGMT_OP_DISCONNECT:
1892 err = disconnect(sk, index, buf + sizeof(*hdr), len); 1887 err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1957,14 +1952,26 @@ done:
1957 return err; 1952 return err;
1958} 1953}
1959 1954
1960int mgmt_index_added(u16 index) 1955static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1961{ 1956{
1962 return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); 1957 u8 *status = data;
1958
1959 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1960 mgmt_pending_remove(cmd);
1963} 1961}
1964 1962
1965int mgmt_index_removed(u16 index) 1963int mgmt_index_added(struct hci_dev *hdev)
1966{ 1964{
1967 return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); 1965 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
1966}
1967
1968int mgmt_index_removed(struct hci_dev *hdev)
1969{
1970 u8 status = ENODEV;
1971
1972 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
1973
1974 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
1968} 1975}
1969 1976
1970struct cmd_lookup { 1977struct cmd_lookup {
@@ -1992,17 +1999,22 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
1992 mgmt_pending_free(cmd); 1999 mgmt_pending_free(cmd);
1993} 2000}
1994 2001
1995int mgmt_powered(u16 index, u8 powered) 2002int mgmt_powered(struct hci_dev *hdev, u8 powered)
1996{ 2003{
1997 struct mgmt_mode ev; 2004 struct mgmt_mode ev;
1998 struct cmd_lookup match = { powered, NULL }; 2005 struct cmd_lookup match = { powered, NULL };
1999 int ret; 2006 int ret;
2000 2007
2001 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); 2008 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, mode_rsp, &match);
2009
2010 if (!powered) {
2011 u8 status = ENETDOWN;
2012 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2013 }
2002 2014
2003 ev.val = powered; 2015 ev.val = powered;
2004 2016
2005 ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); 2017 ret = mgmt_event(MGMT_EV_POWERED, hdev, &ev, sizeof(ev), match.sk);
2006 2018
2007 if (match.sk) 2019 if (match.sk)
2008 sock_put(match.sk); 2020 sock_put(match.sk);
@@ -2010,17 +2022,17 @@ int mgmt_powered(u16 index, u8 powered)
2010 return ret; 2022 return ret;
2011} 2023}
2012 2024
2013int mgmt_discoverable(u16 index, u8 discoverable) 2025int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2014{ 2026{
2015 struct mgmt_mode ev; 2027 struct mgmt_mode ev;
2016 struct cmd_lookup match = { discoverable, NULL }; 2028 struct cmd_lookup match = { discoverable, NULL };
2017 int ret; 2029 int ret;
2018 2030
2019 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); 2031 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, mode_rsp, &match);
2020 2032
2021 ev.val = discoverable; 2033 ev.val = discoverable;
2022 2034
2023 ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), 2035 ret = mgmt_event(MGMT_EV_DISCOVERABLE, hdev, &ev, sizeof(ev),
2024 match.sk); 2036 match.sk);
2025 2037
2026 if (match.sk) 2038 if (match.sk)
@@ -2029,17 +2041,17 @@ int mgmt_discoverable(u16 index, u8 discoverable)
2029 return ret; 2041 return ret;
2030} 2042}
2031 2043
2032int mgmt_connectable(u16 index, u8 connectable) 2044int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2033{ 2045{
2034 struct mgmt_mode ev; 2046 struct mgmt_mode ev;
2035 struct cmd_lookup match = { connectable, NULL }; 2047 struct cmd_lookup match = { connectable, NULL };
2036 int ret; 2048 int ret;
2037 2049
2038 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); 2050 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, mode_rsp, &match);
2039 2051
2040 ev.val = connectable; 2052 ev.val = connectable;
2041 2053
2042 ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); 2054 ret = mgmt_event(MGMT_EV_CONNECTABLE, hdev, &ev, sizeof(ev), match.sk);
2043 2055
2044 if (match.sk) 2056 if (match.sk)
2045 sock_put(match.sk); 2057 sock_put(match.sk);
@@ -2047,9 +2059,23 @@ int mgmt_connectable(u16 index, u8 connectable)
2047 return ret; 2059 return ret;
2048} 2060}
2049 2061
2050int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 2062int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2063{
2064 if (scan & SCAN_PAGE)
2065 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
2066 cmd_status_rsp, &status);
2067
2068 if (scan & SCAN_INQUIRY)
2069 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
2070 cmd_status_rsp, &status);
2071
2072 return 0;
2073}
2074
2075int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2076 u8 persistent)
2051{ 2077{
2052 struct mgmt_ev_new_key ev; 2078 struct mgmt_ev_new_link_key ev;
2053 2079
2054 memset(&ev, 0, sizeof(ev)); 2080 memset(&ev, 0, sizeof(ev));
2055 2081
@@ -2059,17 +2085,17 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
2059 memcpy(ev.key.val, key->val, 16); 2085 memcpy(ev.key.val, key->val, 16);
2060 ev.key.pin_len = key->pin_len; 2086 ev.key.pin_len = key->pin_len;
2061 2087
2062 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 2088 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
2063} 2089}
2064 2090
2065int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type) 2091int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type)
2066{ 2092{
2067 struct mgmt_ev_connected ev; 2093 struct mgmt_addr_info ev;
2068 2094
2069 bacpy(&ev.bdaddr, bdaddr); 2095 bacpy(&ev.bdaddr, bdaddr);
2070 ev.link_type = link_type; 2096 ev.type = link_to_mgmt(link_type);
2071 2097
2072 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); 2098 return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
2073} 2099}
2074 2100
2075static void disconnect_rsp(struct pending_cmd *cmd, void *data) 2101static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2088,17 +2114,18 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2088 mgmt_pending_remove(cmd); 2114 mgmt_pending_remove(cmd);
2089} 2115}
2090 2116
2091int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) 2117int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2092{ 2118{
2093 struct mgmt_ev_disconnected ev; 2119 struct mgmt_addr_info ev;
2094 struct sock *sk = NULL; 2120 struct sock *sk = NULL;
2095 int err; 2121 int err;
2096 2122
2097 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); 2123 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
2098 2124
2099 bacpy(&ev.bdaddr, bdaddr); 2125 bacpy(&ev.bdaddr, bdaddr);
2126 ev.type = link_to_mgmt(type);
2100 2127
2101 err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); 2128 err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
2102 2129
2103 if (sk) 2130 if (sk)
2104 sock_put(sk); 2131 sock_put(sk);
@@ -2106,57 +2133,60 @@ int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
2106 return err; 2133 return err;
2107} 2134}
2108 2135
2109int mgmt_disconnect_failed(u16 index) 2136int mgmt_disconnect_failed(struct hci_dev *hdev)
2110{ 2137{
2111 struct pending_cmd *cmd; 2138 struct pending_cmd *cmd;
2112 int err; 2139 int err;
2113 2140
2114 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index); 2141 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
2115 if (!cmd) 2142 if (!cmd)
2116 return -ENOENT; 2143 return -ENOENT;
2117 2144
2118 err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); 2145 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, EIO);
2119 2146
2120 mgmt_pending_remove(cmd); 2147 mgmt_pending_remove(cmd);
2121 2148
2122 return err; 2149 return err;
2123} 2150}
2124 2151
2125int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status) 2152int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
2153 u8 status)
2126{ 2154{
2127 struct mgmt_ev_connect_failed ev; 2155 struct mgmt_ev_connect_failed ev;
2128 2156
2129 bacpy(&ev.bdaddr, bdaddr); 2157 bacpy(&ev.addr.bdaddr, bdaddr);
2158 ev.addr.type = link_to_mgmt(type);
2130 ev.status = status; 2159 ev.status = status;
2131 2160
2132 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); 2161 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
2133} 2162}
2134 2163
2135int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure) 2164int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
2136{ 2165{
2137 struct mgmt_ev_pin_code_request ev; 2166 struct mgmt_ev_pin_code_request ev;
2138 2167
2139 bacpy(&ev.bdaddr, bdaddr); 2168 bacpy(&ev.bdaddr, bdaddr);
2140 ev.secure = secure; 2169 ev.secure = secure;
2141 2170
2142 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), 2171 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
2143 NULL); 2172 NULL);
2144} 2173}
2145 2174
2146int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 2175int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2176 u8 status)
2147{ 2177{
2148 struct pending_cmd *cmd; 2178 struct pending_cmd *cmd;
2149 struct mgmt_rp_pin_code_reply rp; 2179 struct mgmt_rp_pin_code_reply rp;
2150 int err; 2180 int err;
2151 2181
2152 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); 2182 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
2153 if (!cmd) 2183 if (!cmd)
2154 return -ENOENT; 2184 return -ENOENT;
2155 2185
2156 bacpy(&rp.bdaddr, bdaddr); 2186 bacpy(&rp.bdaddr, bdaddr);
2157 rp.status = status; 2187 rp.status = status;
2158 2188
2159 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, 2189 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
2160 sizeof(rp)); 2190 sizeof(rp));
2161 2191
2162 mgmt_pending_remove(cmd); 2192 mgmt_pending_remove(cmd);
@@ -2164,20 +2194,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2164 return err; 2194 return err;
2165} 2195}
2166 2196
2167int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 2197int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2198 u8 status)
2168{ 2199{
2169 struct pending_cmd *cmd; 2200 struct pending_cmd *cmd;
2170 struct mgmt_rp_pin_code_reply rp; 2201 struct mgmt_rp_pin_code_reply rp;
2171 int err; 2202 int err;
2172 2203
2173 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); 2204 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
2174 if (!cmd) 2205 if (!cmd)
2175 return -ENOENT; 2206 return -ENOENT;
2176 2207
2177 bacpy(&rp.bdaddr, bdaddr); 2208 bacpy(&rp.bdaddr, bdaddr);
2178 rp.status = status; 2209 rp.status = status;
2179 2210
2180 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, 2211 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
2181 sizeof(rp)); 2212 sizeof(rp));
2182 2213
2183 mgmt_pending_remove(cmd); 2214 mgmt_pending_remove(cmd);
@@ -2185,97 +2216,93 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2185 return err; 2216 return err;
2186} 2217}
2187 2218
2188int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, 2219int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2189 u8 confirm_hint) 2220 __le32 value, u8 confirm_hint)
2190{ 2221{
2191 struct mgmt_ev_user_confirm_request ev; 2222 struct mgmt_ev_user_confirm_request ev;
2192 2223
2193 BT_DBG("hci%u", index); 2224 BT_DBG("%s", hdev->name);
2194 2225
2195 bacpy(&ev.bdaddr, bdaddr); 2226 bacpy(&ev.bdaddr, bdaddr);
2196 ev.confirm_hint = confirm_hint; 2227 ev.confirm_hint = confirm_hint;
2197 put_unaligned_le32(value, &ev.value); 2228 put_unaligned_le32(value, &ev.value);
2198 2229
2199 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), 2230 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
2200 NULL); 2231 NULL);
2201} 2232}
2202 2233
2203static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, 2234static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2204 u8 opcode) 2235 u8 status, u8 opcode)
2205{ 2236{
2206 struct pending_cmd *cmd; 2237 struct pending_cmd *cmd;
2207 struct mgmt_rp_user_confirm_reply rp; 2238 struct mgmt_rp_user_confirm_reply rp;
2208 int err; 2239 int err;
2209 2240
2210 cmd = mgmt_pending_find(opcode, index); 2241 cmd = mgmt_pending_find(opcode, hdev);
2211 if (!cmd) 2242 if (!cmd)
2212 return -ENOENT; 2243 return -ENOENT;
2213 2244
2214 bacpy(&rp.bdaddr, bdaddr); 2245 bacpy(&rp.bdaddr, bdaddr);
2215 rp.status = status; 2246 rp.status = status;
2216 err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); 2247 err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
2217 2248
2218 mgmt_pending_remove(cmd); 2249 mgmt_pending_remove(cmd);
2219 2250
2220 return err; 2251 return err;
2221} 2252}
2222 2253
2223int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 2254int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2255 u8 status)
2224{ 2256{
2225 return confirm_reply_complete(index, bdaddr, status, 2257 return confirm_reply_complete(hdev, bdaddr, status,
2226 MGMT_OP_USER_CONFIRM_REPLY); 2258 MGMT_OP_USER_CONFIRM_REPLY);
2227} 2259}
2228 2260
2229int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 2261int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
2262 bdaddr_t *bdaddr, u8 status)
2230{ 2263{
2231 return confirm_reply_complete(index, bdaddr, status, 2264 return confirm_reply_complete(hdev, bdaddr, status,
2232 MGMT_OP_USER_CONFIRM_NEG_REPLY); 2265 MGMT_OP_USER_CONFIRM_NEG_REPLY);
2233} 2266}
2234 2267
2235int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) 2268int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
2236{ 2269{
2237 struct mgmt_ev_auth_failed ev; 2270 struct mgmt_ev_auth_failed ev;
2238 2271
2239 bacpy(&ev.bdaddr, bdaddr); 2272 bacpy(&ev.bdaddr, bdaddr);
2240 ev.status = status; 2273 ev.status = status;
2241 2274
2242 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); 2275 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
2243} 2276}
2244 2277
2245int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status) 2278int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
2246{ 2279{
2247 struct pending_cmd *cmd; 2280 struct pending_cmd *cmd;
2248 struct hci_dev *hdev;
2249 struct mgmt_cp_set_local_name ev; 2281 struct mgmt_cp_set_local_name ev;
2250 int err; 2282 int err;
2251 2283
2252 memset(&ev, 0, sizeof(ev)); 2284 memset(&ev, 0, sizeof(ev));
2253 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 2285 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2254 2286
2255 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index); 2287 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2256 if (!cmd) 2288 if (!cmd)
2257 goto send_event; 2289 goto send_event;
2258 2290
2259 if (status) { 2291 if (status) {
2260 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO); 2292 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2293 EIO);
2261 goto failed; 2294 goto failed;
2262 } 2295 }
2263 2296
2264 hdev = hci_dev_get(index); 2297 update_eir(hdev);
2265 if (hdev) {
2266 hci_dev_lock_bh(hdev);
2267 update_eir(hdev);
2268 hci_dev_unlock_bh(hdev);
2269 hci_dev_put(hdev);
2270 }
2271 2298
2272 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev, 2299 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
2273 sizeof(ev)); 2300 sizeof(ev));
2274 if (err < 0) 2301 if (err < 0)
2275 goto failed; 2302 goto failed;
2276 2303
2277send_event: 2304send_event:
2278 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev), 2305 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
2279 cmd ? cmd->sk : NULL); 2306 cmd ? cmd->sk : NULL);
2280 2307
2281failed: 2308failed:
@@ -2284,29 +2311,30 @@ failed:
2284 return err; 2311 return err;
2285} 2312}
2286 2313
2287int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, 2314int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2288 u8 status) 2315 u8 *randomizer, u8 status)
2289{ 2316{
2290 struct pending_cmd *cmd; 2317 struct pending_cmd *cmd;
2291 int err; 2318 int err;
2292 2319
2293 BT_DBG("hci%u status %u", index, status); 2320 BT_DBG("%s status %u", hdev->name, status);
2294 2321
2295 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index); 2322 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
2296 if (!cmd) 2323 if (!cmd)
2297 return -ENOENT; 2324 return -ENOENT;
2298 2325
2299 if (status) { 2326 if (status) {
2300 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2327 err = cmd_status(cmd->sk, hdev->id,
2301 EIO); 2328 MGMT_OP_READ_LOCAL_OOB_DATA, EIO);
2302 } else { 2329 } else {
2303 struct mgmt_rp_read_local_oob_data rp; 2330 struct mgmt_rp_read_local_oob_data rp;
2304 2331
2305 memcpy(rp.hash, hash, sizeof(rp.hash)); 2332 memcpy(rp.hash, hash, sizeof(rp.hash));
2306 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 2333 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2307 2334
2308 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2335 err = cmd_complete(cmd->sk, hdev->id,
2309 &rp, sizeof(rp)); 2336 MGMT_OP_READ_LOCAL_OOB_DATA,
2337 &rp, sizeof(rp));
2310 } 2338 }
2311 2339
2312 mgmt_pending_remove(cmd); 2340 mgmt_pending_remove(cmd);
@@ -2314,14 +2342,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2314 return err; 2342 return err;
2315} 2343}
2316 2344
2317int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, 2345int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
2318 u8 *eir) 2346 u8 *dev_class, s8 rssi, u8 *eir)
2319{ 2347{
2320 struct mgmt_ev_device_found ev; 2348 struct mgmt_ev_device_found ev;
2321 2349
2322 memset(&ev, 0, sizeof(ev)); 2350 memset(&ev, 0, sizeof(ev));
2323 2351
2324 bacpy(&ev.bdaddr, bdaddr); 2352 bacpy(&ev.addr.bdaddr, bdaddr);
2353 ev.addr.type = link_to_mgmt(type);
2325 ev.rssi = rssi; 2354 ev.rssi = rssi;
2326 2355
2327 if (eir) 2356 if (eir)
@@ -2330,10 +2359,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2330 if (dev_class) 2359 if (dev_class)
2331 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); 2360 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2332 2361
2333 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); 2362 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
2334} 2363}
2335 2364
2336int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) 2365int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
2337{ 2366{
2338 struct mgmt_ev_remote_name ev; 2367 struct mgmt_ev_remote_name ev;
2339 2368
@@ -2342,37 +2371,64 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2342 bacpy(&ev.bdaddr, bdaddr); 2371 bacpy(&ev.bdaddr, bdaddr);
2343 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 2372 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2344 2373
2345 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL); 2374 return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
2346} 2375}
2347 2376
2348int mgmt_discovering(u16 index, u8 discovering) 2377int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status)
2349{ 2378{
2350 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, 2379 struct pending_cmd *cmd;
2380 int err;
2381
2382 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2383 if (!cmd)
2384 return -ENOENT;
2385
2386 err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
2387 mgmt_pending_remove(cmd);
2388
2389 return err;
2390}
2391
2392int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
2393{
2394 struct pending_cmd *cmd;
2395
2396 if (discovering)
2397 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2398 else
2399 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2400
2401 if (cmd != NULL) {
2402 cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
2403 mgmt_pending_remove(cmd);
2404 }
2405
2406 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
2351 sizeof(discovering), NULL); 2407 sizeof(discovering), NULL);
2352} 2408}
2353 2409
2354int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr) 2410int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
2355{ 2411{
2356 struct pending_cmd *cmd; 2412 struct pending_cmd *cmd;
2357 struct mgmt_ev_device_blocked ev; 2413 struct mgmt_ev_device_blocked ev;
2358 2414
2359 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index); 2415 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
2360 2416
2361 bacpy(&ev.bdaddr, bdaddr); 2417 bacpy(&ev.bdaddr, bdaddr);
2362 2418
2363 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev), 2419 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
2364 cmd ? cmd->sk : NULL); 2420 cmd ? cmd->sk : NULL);
2365} 2421}
2366 2422
2367int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr) 2423int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
2368{ 2424{
2369 struct pending_cmd *cmd; 2425 struct pending_cmd *cmd;
2370 struct mgmt_ev_device_unblocked ev; 2426 struct mgmt_ev_device_unblocked ev;
2371 2427
2372 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index); 2428 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
2373 2429
2374 bacpy(&ev.bdaddr, bdaddr); 2430 bacpy(&ev.bdaddr, bdaddr);
2375 2431
2376 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev), 2432 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
2377 cmd ? cmd->sk : NULL); 2433 cmd ? cmd->sk : NULL);
2378} 2434}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18211f9..8743f369ed3f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -65,7 +65,8 @@ static DEFINE_MUTEX(rfcomm_mutex);
65 65
66static LIST_HEAD(session_list); 66static LIST_HEAD(session_list);
67 67
68static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); 68static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
69 u32 priority);
69static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); 70static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
70static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci); 71static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci);
71static int rfcomm_queue_disc(struct rfcomm_dlc *d); 72static int rfcomm_queue_disc(struct rfcomm_dlc *d);
@@ -377,13 +378,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
377static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) 378static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
378{ 379{
379 struct rfcomm_dlc *d; 380 struct rfcomm_dlc *d;
380 struct list_head *p;
381 381
382 list_for_each(p, &s->dlcs) { 382 list_for_each_entry(d, &s->dlcs, list)
383 d = list_entry(p, struct rfcomm_dlc, list);
384 if (d->dlci == dlci) 383 if (d->dlci == dlci)
385 return d; 384 return d;
386 } 385
387 return NULL; 386 return NULL;
388} 387}
389 388
@@ -749,19 +748,34 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
749} 748}
750 749
751/* ---- RFCOMM frame sending ---- */ 750/* ---- RFCOMM frame sending ---- */
752static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) 751static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
752 u32 priority)
753{ 753{
754 struct socket *sock = s->sock; 754 struct socket *sock = s->sock;
755 struct sock *sk = sock->sk;
755 struct kvec iv = { data, len }; 756 struct kvec iv = { data, len };
756 struct msghdr msg; 757 struct msghdr msg;
757 758
758 BT_DBG("session %p len %d", s, len); 759 BT_DBG("session %p len %d priority %u", s, len, priority);
760
761 if (sk->sk_priority != priority) {
762 lock_sock(sk);
763 sk->sk_priority = priority;
764 release_sock(sk);
765 }
759 766
760 memset(&msg, 0, sizeof(msg)); 767 memset(&msg, 0, sizeof(msg));
761 768
762 return kernel_sendmsg(sock, &msg, &iv, 1, len); 769 return kernel_sendmsg(sock, &msg, &iv, 1, len);
763} 770}
764 771
772static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
773{
774 BT_DBG("%p cmd %u", s, cmd->ctrl);
775
776 return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd), HCI_PRIO_MAX);
777}
778
765static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) 779static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
766{ 780{
767 struct rfcomm_cmd cmd; 781 struct rfcomm_cmd cmd;
@@ -773,7 +787,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
773 cmd.len = __len8(0); 787 cmd.len = __len8(0);
774 cmd.fcs = __fcs2((u8 *) &cmd); 788 cmd.fcs = __fcs2((u8 *) &cmd);
775 789
776 return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); 790 return rfcomm_send_cmd(s, &cmd);
777} 791}
778 792
779static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) 793static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +801,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
787 cmd.len = __len8(0); 801 cmd.len = __len8(0);
788 cmd.fcs = __fcs2((u8 *) &cmd); 802 cmd.fcs = __fcs2((u8 *) &cmd);
789 803
790 return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); 804 return rfcomm_send_cmd(s, &cmd);
791} 805}
792 806
793static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) 807static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +815,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
801 cmd.len = __len8(0); 815 cmd.len = __len8(0);
802 cmd.fcs = __fcs2((u8 *) &cmd); 816 cmd.fcs = __fcs2((u8 *) &cmd);
803 817
804 return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); 818 return rfcomm_send_cmd(s, &cmd);
805} 819}
806 820
807static int rfcomm_queue_disc(struct rfcomm_dlc *d) 821static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -815,6 +829,8 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
815 if (!skb) 829 if (!skb)
816 return -ENOMEM; 830 return -ENOMEM;
817 831
832 skb->priority = HCI_PRIO_MAX;
833
818 cmd = (void *) __skb_put(skb, sizeof(*cmd)); 834 cmd = (void *) __skb_put(skb, sizeof(*cmd));
819 cmd->addr = d->addr; 835 cmd->addr = d->addr;
820 cmd->ctrl = __ctrl(RFCOMM_DISC, 1); 836 cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
@@ -837,7 +853,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
837 cmd.len = __len8(0); 853 cmd.len = __len8(0);
838 cmd.fcs = __fcs2((u8 *) &cmd); 854 cmd.fcs = __fcs2((u8 *) &cmd);
839 855
840 return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); 856 return rfcomm_send_cmd(s, &cmd);
841} 857}
842 858
843static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) 859static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -862,7 +878,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
862 878
863 *ptr = __fcs(buf); ptr++; 879 *ptr = __fcs(buf); ptr++;
864 880
865 return rfcomm_send_frame(s, buf, ptr - buf); 881 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
866} 882}
867 883
868static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d) 884static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d)
@@ -904,7 +920,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
904 920
905 *ptr = __fcs(buf); ptr++; 921 *ptr = __fcs(buf); ptr++;
906 922
907 return rfcomm_send_frame(s, buf, ptr - buf); 923 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
908} 924}
909 925
910int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, 926int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
@@ -942,7 +958,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
942 958
943 *ptr = __fcs(buf); ptr++; 959 *ptr = __fcs(buf); ptr++;
944 960
945 return rfcomm_send_frame(s, buf, ptr - buf); 961 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
946} 962}
947 963
948static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) 964static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
@@ -969,7 +985,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
969 985
970 *ptr = __fcs(buf); ptr++; 986 *ptr = __fcs(buf); ptr++;
971 987
972 return rfcomm_send_frame(s, buf, ptr - buf); 988 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
973} 989}
974 990
975static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig) 991static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig)
@@ -996,7 +1012,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig
996 1012
997 *ptr = __fcs(buf); ptr++; 1013 *ptr = __fcs(buf); ptr++;
998 1014
999 return rfcomm_send_frame(s, buf, ptr - buf); 1015 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
1000} 1016}
1001 1017
1002static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) 1018static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
@@ -1018,7 +1034,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
1018 1034
1019 *ptr = __fcs(buf); ptr++; 1035 *ptr = __fcs(buf); ptr++;
1020 1036
1021 return rfcomm_send_frame(s, buf, ptr - buf); 1037 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
1022} 1038}
1023 1039
1024static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) 1040static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
@@ -1040,7 +1056,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
1040 1056
1041 *ptr = __fcs(buf); ptr++; 1057 *ptr = __fcs(buf); ptr++;
1042 1058
1043 return rfcomm_send_frame(s, buf, ptr - buf); 1059 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
1044} 1060}
1045 1061
1046static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len) 1062static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
@@ -1091,7 +1107,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
1091 1107
1092 *ptr = __fcs(buf); ptr++; 1108 *ptr = __fcs(buf); ptr++;
1093 1109
1094 return rfcomm_send_frame(s, buf, ptr - buf); 1110 return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
1095} 1111}
1096 1112
1097static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) 1113static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
@@ -1769,7 +1785,8 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1769 return skb_queue_len(&d->tx_queue); 1785 return skb_queue_len(&d->tx_queue);
1770 1786
1771 while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) { 1787 while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) {
1772 err = rfcomm_send_frame(d->session, skb->data, skb->len); 1788 err = rfcomm_send_frame(d->session, skb->data, skb->len,
1789 skb->priority);
1773 if (err < 0) { 1790 if (err < 0) {
1774 skb_queue_head(&d->tx_queue, skb); 1791 skb_queue_head(&d->tx_queue, skb);
1775 break; 1792 break;
@@ -2120,15 +2137,13 @@ static struct hci_cb rfcomm_cb = {
2120static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) 2137static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2121{ 2138{
2122 struct rfcomm_session *s; 2139 struct rfcomm_session *s;
2123 struct list_head *pp, *p;
2124 2140
2125 rfcomm_lock(); 2141 rfcomm_lock();
2126 2142
2127 list_for_each(p, &session_list) { 2143 list_for_each_entry(s, &session_list, list) {
2128 s = list_entry(p, struct rfcomm_session, list); 2144 struct rfcomm_dlc *d;
2129 list_for_each(pp, &s->dlcs) { 2145 list_for_each_entry(d, &s->dlcs, list) {
2130 struct sock *sk = s->sock->sk; 2146 struct sock *sk = s->sock->sk;
2131 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2132 2147
2133 seq_printf(f, "%s %s %ld %d %d %d %d\n", 2148 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2134 batostr(&bt_sk(sk)->src), 2149 batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5417f6127323..aea2bdd1510f 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
600 break; 600 break;
601 } 601 }
602 602
603 skb->priority = sk->sk_priority;
604
603 err = rfcomm_dlc_send(d, skb); 605 err = rfcomm_dlc_send(d, skb);
604 if (err < 0) { 606 if (err < 0) {
605 kfree_skb(skb); 607 kfree_skb(skb);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796313e0..fa8f4de53b99 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
34#include <linux/capability.h> 34#include <linux/capability.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
37 38
38#include <net/bluetooth/bluetooth.h> 39#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h> 40#include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@ struct rfcomm_dev {
65 struct rfcomm_dlc *dlc; 66 struct rfcomm_dlc *dlc;
66 struct tty_struct *tty; 67 struct tty_struct *tty;
67 wait_queue_head_t wait; 68 wait_queue_head_t wait;
68 struct tasklet_struct wakeup_task; 69 struct work_struct wakeup_task;
69 70
70 struct device *tty_dev; 71 struct device *tty_dev;
71 72
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
81static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); 82static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
82static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); 83static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
83 84
84static void rfcomm_tty_wakeup(unsigned long arg); 85static void rfcomm_tty_wakeup(struct work_struct *work);
85 86
86/* ---- Device functions ---- */ 87/* ---- Device functions ---- */
87static void rfcomm_dev_destruct(struct rfcomm_dev *dev) 88static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
133static struct rfcomm_dev *__rfcomm_dev_get(int id) 134static struct rfcomm_dev *__rfcomm_dev_get(int id)
134{ 135{
135 struct rfcomm_dev *dev; 136 struct rfcomm_dev *dev;
136 struct list_head *p;
137 137
138 list_for_each(p, &rfcomm_dev_list) { 138 list_for_each_entry(dev, &rfcomm_dev_list, list)
139 dev = list_entry(p, struct rfcomm_dev, list);
140 if (dev->id == id) 139 if (dev->id == id)
141 return dev; 140 return dev;
142 }
143 141
144 return NULL; 142 return NULL;
145} 143}
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
197 195
198static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 196static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
199{ 197{
200 struct rfcomm_dev *dev; 198 struct rfcomm_dev *dev, *entry;
201 struct list_head *head = &rfcomm_dev_list, *p; 199 struct list_head *head = &rfcomm_dev_list, *p;
202 int err = 0; 200 int err = 0;
203 201
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
212 if (req->dev_id < 0) { 210 if (req->dev_id < 0) {
213 dev->id = 0; 211 dev->id = 0;
214 212
215 list_for_each(p, &rfcomm_dev_list) { 213 list_for_each_entry(entry, &rfcomm_dev_list, list) {
216 if (list_entry(p, struct rfcomm_dev, list)->id != dev->id) 214 if (entry->id != dev->id)
217 break; 215 break;
218 216
219 dev->id++; 217 dev->id++;
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
222 } else { 220 } else {
223 dev->id = req->dev_id; 221 dev->id = req->dev_id;
224 222
225 list_for_each(p, &rfcomm_dev_list) { 223 list_for_each_entry(entry, &rfcomm_dev_list, list) {
226 struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
227
228 if (entry->id == dev->id) { 224 if (entry->id == dev->id) {
229 err = -EADDRINUSE; 225 err = -EADDRINUSE;
230 goto out; 226 goto out;
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
257 atomic_set(&dev->opened, 0); 253 atomic_set(&dev->opened, 0);
258 254
259 init_waitqueue_head(&dev->wait); 255 init_waitqueue_head(&dev->wait);
260 tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); 256 INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
261 257
262 skb_queue_head_init(&dev->pending); 258 skb_queue_head_init(&dev->pending);
263 259
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
351 struct rfcomm_dev *dev = (void *) skb->sk; 347 struct rfcomm_dev *dev = (void *) skb->sk;
352 atomic_sub(skb->truesize, &dev->wmem_alloc); 348 atomic_sub(skb->truesize, &dev->wmem_alloc);
353 if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) 349 if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
354 tasklet_schedule(&dev->wakeup_task); 350 queue_work(system_nrt_wq, &dev->wakeup_task);
355 rfcomm_dev_put(dev); 351 rfcomm_dev_put(dev);
356} 352}
357 353
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg)
455 451
456static int rfcomm_get_dev_list(void __user *arg) 452static int rfcomm_get_dev_list(void __user *arg)
457{ 453{
454 struct rfcomm_dev *dev;
458 struct rfcomm_dev_list_req *dl; 455 struct rfcomm_dev_list_req *dl;
459 struct rfcomm_dev_info *di; 456 struct rfcomm_dev_info *di;
460 struct list_head *p;
461 int n = 0, size, err; 457 int n = 0, size, err;
462 u16 dev_num; 458 u16 dev_num;
463 459
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg)
479 475
480 read_lock_bh(&rfcomm_dev_lock); 476 read_lock_bh(&rfcomm_dev_lock);
481 477
482 list_for_each(p, &rfcomm_dev_list) { 478 list_for_each_entry(dev, &rfcomm_dev_list, list) {
483 struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
484 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) 479 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
485 continue; 480 continue;
486 (di + n)->id = dev->id; 481 (di + n)->id = dev->id;
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
635} 630}
636 631
637/* ---- TTY functions ---- */ 632/* ---- TTY functions ---- */
638static void rfcomm_tty_wakeup(unsigned long arg) 633static void rfcomm_tty_wakeup(struct work_struct *work)
639{ 634{
640 struct rfcomm_dev *dev = (void *) arg; 635 struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
636 wakeup_task);
641 struct tty_struct *tty = dev->tty; 637 struct tty_struct *tty = dev->tty;
642 if (!tty) 638 if (!tty)
643 return; 639 return;
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
762 rfcomm_dlc_close(dev->dlc, 0); 758 rfcomm_dlc_close(dev->dlc, 0);
763 759
764 clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); 760 clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
765 tasklet_kill(&dev->wakeup_task); 761 cancel_work_sync(&dev->wakeup_task);
766 762
767 rfcomm_dlc_lock(dev->dlc); 763 rfcomm_dlc_lock(dev->dlc);
768 tty->driver_data = NULL; 764 tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = {
1155 1151
1156int __init rfcomm_init_ttys(void) 1152int __init rfcomm_init_ttys(void)
1157{ 1153{
1154 int error;
1155
1158 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); 1156 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
1159 if (!rfcomm_tty_driver) 1157 if (!rfcomm_tty_driver)
1160 return -1; 1158 return -ENOMEM;
1161 1159
1162 rfcomm_tty_driver->owner = THIS_MODULE; 1160 rfcomm_tty_driver->owner = THIS_MODULE;
1163 rfcomm_tty_driver->driver_name = "rfcomm"; 1161 rfcomm_tty_driver->driver_name = "rfcomm";
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void)
1172 rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; 1170 rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
1173 tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); 1171 tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
1174 1172
1175 if (tty_register_driver(rfcomm_tty_driver)) { 1173 error = tty_register_driver(rfcomm_tty_driver);
1174 if (error) {
1176 BT_ERR("Can't register RFCOMM TTY driver"); 1175 BT_ERR("Can't register RFCOMM TTY driver");
1177 put_tty_driver(rfcomm_tty_driver); 1176 put_tty_driver(rfcomm_tty_driver);
1178 return -1; 1177 return error;
1179 } 1178 }
1180 1179
1181 BT_INFO("RFCOMM TTY layer initialized"); 1180 BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 759b63572641..94e94ca35384 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -181,7 +181,8 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
181 if (!skb) 181 if (!skb)
182 return; 182 return;
183 183
184 hci_send_acl(conn->hcon, skb, 0); 184 skb->priority = HCI_PRIO_MAX;
185 hci_send_acl(conn->hchan, skb, 0);
185 186
186 mod_timer(&conn->security_timer, jiffies + 187 mod_timer(&conn->security_timer, jiffies +
187 msecs_to_jiffies(SMP_TIMEOUT)); 188 msecs_to_jiffies(SMP_TIMEOUT));