aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-12-19 14:28:22 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-12-19 14:28:22 -0500
commit9662cbc712babe3f7a792af2bdd47fa0c631f27f (patch)
tree9a19f5656fb83ab58a4a3773b12ed1036bf8753b /net
parent640f5950a7d9fd0d279d843b261eb934793605fb (diff)
parent4b0b2f088f12e2ada1297502d7bebde182cf65b0 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-next
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c137
-rw-r--r--net/bluetooth/hci_core.c173
-rw-r--r--net/bluetooth/hci_event.c99
-rw-r--r--net/bluetooth/hci_sock.c21
-rw-r--r--net/bluetooth/hci_sysfs.c91
-rw-r--r--net/bluetooth/hidp/core.c4
-rw-r--r--net/bluetooth/l2cap_core.c360
-rw-r--r--net/bluetooth/l2cap_sock.c62
-rw-r--r--net/bluetooth/mgmt.c696
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bluetooth/sco.c4
11 files changed, 828 insertions, 820 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b328ac611ccd..401d8ea266aa 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -275,9 +275,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
275 } 275 }
276} 276}
277 277
278static void hci_conn_timeout(unsigned long arg) 278static void hci_conn_timeout(struct work_struct *work)
279{ 279{
280 struct hci_conn *conn = (void *) arg; 280 struct hci_conn *conn = container_of(work, struct hci_conn,
281 disc_work.work);
281 struct hci_dev *hdev = conn->hdev; 282 struct hci_dev *hdev = conn->hdev;
282 __u8 reason; 283 __u8 reason;
283 284
@@ -311,6 +312,42 @@ static void hci_conn_timeout(unsigned long arg)
311 hci_dev_unlock(hdev); 312 hci_dev_unlock(hdev);
312} 313}
313 314
315/* Enter sniff mode */
316static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
317{
318 struct hci_dev *hdev = conn->hdev;
319
320 BT_DBG("conn %p mode %d", conn, conn->mode);
321
322 if (test_bit(HCI_RAW, &hdev->flags))
323 return;
324
325 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
326 return;
327
328 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
329 return;
330
331 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
332 struct hci_cp_sniff_subrate cp;
333 cp.handle = cpu_to_le16(conn->handle);
334 cp.max_latency = cpu_to_le16(0);
335 cp.min_remote_timeout = cpu_to_le16(0);
336 cp.min_local_timeout = cpu_to_le16(0);
337 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
338 }
339
340 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
341 struct hci_cp_sniff_mode cp;
342 cp.handle = cpu_to_le16(conn->handle);
343 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
344 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
345 cp.attempt = cpu_to_le16(4);
346 cp.timeout = cpu_to_le16(1);
347 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
348 }
349}
350
314static void hci_conn_idle(unsigned long arg) 351static void hci_conn_idle(unsigned long arg)
315{ 352{
316 struct hci_conn *conn = (void *) arg; 353 struct hci_conn *conn = (void *) arg;
@@ -325,12 +362,8 @@ static void hci_conn_auto_accept(unsigned long arg)
325 struct hci_conn *conn = (void *) arg; 362 struct hci_conn *conn = (void *) arg;
326 struct hci_dev *hdev = conn->hdev; 363 struct hci_dev *hdev = conn->hdev;
327 364
328 hci_dev_lock(hdev);
329
330 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 365 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
331 &conn->dst); 366 &conn->dst);
332
333 hci_dev_unlock(hdev);
334} 367}
335 368
336struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 369struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -374,9 +407,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
374 407
375 skb_queue_head_init(&conn->data_q); 408 skb_queue_head_init(&conn->data_q);
376 409
377 hci_chan_hash_init(conn); 410 INIT_LIST_HEAD(&conn->chan_list);;
378 411
379 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 412 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
380 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 413 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
381 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 414 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
382 (unsigned long) conn); 415 (unsigned long) conn);
@@ -385,8 +418,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
385 418
386 hci_dev_hold(hdev); 419 hci_dev_hold(hdev);
387 420
388 tasklet_disable(&hdev->tx_task);
389
390 hci_conn_hash_add(hdev, conn); 421 hci_conn_hash_add(hdev, conn);
391 if (hdev->notify) 422 if (hdev->notify)
392 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 423 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
@@ -395,8 +426,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
395 426
396 hci_conn_init_sysfs(conn); 427 hci_conn_init_sysfs(conn);
397 428
398 tasklet_enable(&hdev->tx_task);
399
400 return conn; 429 return conn;
401} 430}
402 431
@@ -408,7 +437,7 @@ int hci_conn_del(struct hci_conn *conn)
408 437
409 del_timer(&conn->idle_timer); 438 del_timer(&conn->idle_timer);
410 439
411 del_timer(&conn->disc_timer); 440 cancel_delayed_work_sync(&conn->disc_work);
412 441
413 del_timer(&conn->auto_accept_timer); 442 del_timer(&conn->auto_accept_timer);
414 443
@@ -432,16 +461,13 @@ int hci_conn_del(struct hci_conn *conn)
432 } 461 }
433 } 462 }
434 463
435 tasklet_disable(&hdev->tx_task);
436 464
437 hci_chan_hash_flush(conn); 465 hci_chan_list_flush(conn);
438 466
439 hci_conn_hash_del(hdev, conn); 467 hci_conn_hash_del(hdev, conn);
440 if (hdev->notify) 468 if (hdev->notify)
441 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 469 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
442 470
443 tasklet_enable(&hdev->tx_task);
444
445 skb_queue_purge(&conn->data_q); 471 skb_queue_purge(&conn->data_q);
446 472
447 hci_conn_put_device(conn); 473 hci_conn_put_device(conn);
@@ -674,7 +700,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
674 goto encrypt; 700 goto encrypt;
675 701
676auth: 702auth:
677 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 703 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
678 return 0; 704 return 0;
679 705
680 if (!hci_conn_auth(conn, sec_level, auth_type)) 706 if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -767,57 +793,15 @@ timer:
767 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 793 jiffies + msecs_to_jiffies(hdev->idle_timeout));
768} 794}
769 795
770/* Enter sniff mode */
771void hci_conn_enter_sniff_mode(struct hci_conn *conn)
772{
773 struct hci_dev *hdev = conn->hdev;
774
775 BT_DBG("conn %p mode %d", conn, conn->mode);
776
777 if (test_bit(HCI_RAW, &hdev->flags))
778 return;
779
780 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
781 return;
782
783 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
784 return;
785
786 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
787 struct hci_cp_sniff_subrate cp;
788 cp.handle = cpu_to_le16(conn->handle);
789 cp.max_latency = cpu_to_le16(0);
790 cp.min_remote_timeout = cpu_to_le16(0);
791 cp.min_local_timeout = cpu_to_le16(0);
792 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
793 }
794
795 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
796 struct hci_cp_sniff_mode cp;
797 cp.handle = cpu_to_le16(conn->handle);
798 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
799 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
800 cp.attempt = cpu_to_le16(4);
801 cp.timeout = cpu_to_le16(1);
802 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
803 }
804}
805
806/* Drop all connection on the device */ 796/* Drop all connection on the device */
807void hci_conn_hash_flush(struct hci_dev *hdev) 797void hci_conn_hash_flush(struct hci_dev *hdev)
808{ 798{
809 struct hci_conn_hash *h = &hdev->conn_hash; 799 struct hci_conn_hash *h = &hdev->conn_hash;
810 struct list_head *p; 800 struct hci_conn *c;
811 801
812 BT_DBG("hdev %s", hdev->name); 802 BT_DBG("hdev %s", hdev->name);
813 803
814 p = h->list.next; 804 list_for_each_entry_rcu(c, &h->list, list) {
815 while (p != &h->list) {
816 struct hci_conn *c;
817
818 c = list_entry(p, struct hci_conn, list);
819 p = p->next;
820
821 c->state = BT_CLOSED; 805 c->state = BT_CLOSED;
822 806
823 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); 807 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
@@ -882,7 +866,7 @@ int hci_get_conn_list(void __user *arg)
882 866
883 ci = cl->conn_info; 867 ci = cl->conn_info;
884 868
885 hci_dev_lock_bh(hdev); 869 hci_dev_lock(hdev);
886 list_for_each_entry(c, &hdev->conn_hash.list, list) { 870 list_for_each_entry(c, &hdev->conn_hash.list, list) {
887 bacpy(&(ci + n)->bdaddr, &c->dst); 871 bacpy(&(ci + n)->bdaddr, &c->dst);
888 (ci + n)->handle = c->handle; 872 (ci + n)->handle = c->handle;
@@ -893,7 +877,7 @@ int hci_get_conn_list(void __user *arg)
893 if (++n >= req.conn_num) 877 if (++n >= req.conn_num)
894 break; 878 break;
895 } 879 }
896 hci_dev_unlock_bh(hdev); 880 hci_dev_unlock(hdev);
897 881
898 cl->dev_id = hdev->id; 882 cl->dev_id = hdev->id;
899 cl->conn_num = n; 883 cl->conn_num = n;
@@ -917,7 +901,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
917 if (copy_from_user(&req, arg, sizeof(req))) 901 if (copy_from_user(&req, arg, sizeof(req)))
918 return -EFAULT; 902 return -EFAULT;
919 903
920 hci_dev_lock_bh(hdev); 904 hci_dev_lock(hdev);
921 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 905 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
922 if (conn) { 906 if (conn) {
923 bacpy(&ci.bdaddr, &conn->dst); 907 bacpy(&ci.bdaddr, &conn->dst);
@@ -927,7 +911,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
927 ci.state = conn->state; 911 ci.state = conn->state;
928 ci.link_mode = conn->link_mode; 912 ci.link_mode = conn->link_mode;
929 } 913 }
930 hci_dev_unlock_bh(hdev); 914 hci_dev_unlock(hdev);
931 915
932 if (!conn) 916 if (!conn)
933 return -ENOENT; 917 return -ENOENT;
@@ -943,11 +927,11 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
943 if (copy_from_user(&req, arg, sizeof(req))) 927 if (copy_from_user(&req, arg, sizeof(req)))
944 return -EFAULT; 928 return -EFAULT;
945 929
946 hci_dev_lock_bh(hdev); 930 hci_dev_lock(hdev);
947 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); 931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
948 if (conn) 932 if (conn)
949 req.type = conn->auth_type; 933 req.type = conn->auth_type;
950 hci_dev_unlock_bh(hdev); 934 hci_dev_unlock(hdev);
951 935
952 if (!conn) 936 if (!conn)
953 return -ENOENT; 937 return -ENOENT;
@@ -969,9 +953,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
969 chan->conn = conn; 953 chan->conn = conn;
970 skb_queue_head_init(&chan->data_q); 954 skb_queue_head_init(&chan->data_q);
971 955
972 tasklet_disable(&hdev->tx_task); 956 list_add_rcu(&chan->list, &conn->chan_list);
973 hci_chan_hash_add(conn, chan);
974 tasklet_enable(&hdev->tx_task);
975 957
976 return chan; 958 return chan;
977} 959}
@@ -983,9 +965,9 @@ int hci_chan_del(struct hci_chan *chan)
983 965
984 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); 966 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
985 967
986 tasklet_disable(&hdev->tx_task); 968 list_del_rcu(&chan->list);
987 hci_chan_hash_del(conn, chan); 969
988 tasklet_enable(&hdev->tx_task); 970 synchronize_rcu();
989 971
990 skb_queue_purge(&chan->data_q); 972 skb_queue_purge(&chan->data_q);
991 kfree(chan); 973 kfree(chan);
@@ -993,13 +975,12 @@ int hci_chan_del(struct hci_chan *chan)
993 return 0; 975 return 0;
994} 976}
995 977
996void hci_chan_hash_flush(struct hci_conn *conn) 978void hci_chan_list_flush(struct hci_conn *conn)
997{ 979{
998 struct hci_chan_hash *h = &conn->chan_hash; 980 struct hci_chan *chan;
999 struct hci_chan *chan, *tmp;
1000 981
1001 BT_DBG("conn %p", conn); 982 BT_DBG("conn %p", conn);
1002 983
1003 list_for_each_entry_safe(chan, tmp, &h->list, list) 984 list_for_each_entry_rcu(chan, &conn->chan_list, list)
1004 hci_chan_del(chan); 985 hci_chan_del(chan);
1005} 986}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ce3727ecc0c4..d6382dbb7b76 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
4 5
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 7
@@ -56,11 +57,11 @@
56 57
57int enable_hs; 58int enable_hs;
58 59
59static void hci_cmd_task(unsigned long arg); 60static void hci_rx_work(struct work_struct *work);
60static void hci_rx_task(unsigned long arg); 61static void hci_cmd_work(struct work_struct *work);
61static void hci_tx_task(unsigned long arg); 62static void hci_tx_work(struct work_struct *work);
62 63
63static DEFINE_RWLOCK(hci_task_lock); 64static DEFINE_MUTEX(hci_task_lock);
64 65
65/* HCI device list */ 66/* HCI device list */
66LIST_HEAD(hci_dev_list); 67LIST_HEAD(hci_dev_list);
@@ -209,7 +210,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
209 skb->dev = (void *) hdev; 210 skb->dev = (void *) hdev;
210 211
211 skb_queue_tail(&hdev->cmd_q, skb); 212 skb_queue_tail(&hdev->cmd_q, skb);
212 tasklet_schedule(&hdev->cmd_task); 213 queue_work(hdev->workqueue, &hdev->cmd_work);
213 } 214 }
214 skb_queue_purge(&hdev->driver_init); 215 skb_queue_purge(&hdev->driver_init);
215 216
@@ -433,14 +434,14 @@ int hci_inquiry(void __user *arg)
433 if (!hdev) 434 if (!hdev)
434 return -ENODEV; 435 return -ENODEV;
435 436
436 hci_dev_lock_bh(hdev); 437 hci_dev_lock(hdev);
437 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438 inquiry_cache_empty(hdev) || 439 inquiry_cache_empty(hdev) ||
439 ir.flags & IREQ_CACHE_FLUSH) { 440 ir.flags & IREQ_CACHE_FLUSH) {
440 inquiry_cache_flush(hdev); 441 inquiry_cache_flush(hdev);
441 do_inquiry = 1; 442 do_inquiry = 1;
442 } 443 }
443 hci_dev_unlock_bh(hdev); 444 hci_dev_unlock(hdev);
444 445
445 timeo = ir.length * msecs_to_jiffies(2000); 446 timeo = ir.length * msecs_to_jiffies(2000);
446 447
@@ -462,9 +463,9 @@ int hci_inquiry(void __user *arg)
462 goto done; 463 goto done;
463 } 464 }
464 465
465 hci_dev_lock_bh(hdev); 466 hci_dev_lock(hdev);
466 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467 hci_dev_unlock_bh(hdev); 468 hci_dev_unlock(hdev);
468 469
469 BT_DBG("num_rsp %d", ir.num_rsp); 470 BT_DBG("num_rsp %d", ir.num_rsp);
470 471
@@ -541,15 +542,15 @@ int hci_dev_open(__u16 dev)
541 set_bit(HCI_UP, &hdev->flags); 542 set_bit(HCI_UP, &hdev->flags);
542 hci_notify(hdev, HCI_DEV_UP); 543 hci_notify(hdev, HCI_DEV_UP);
543 if (!test_bit(HCI_SETUP, &hdev->flags)) { 544 if (!test_bit(HCI_SETUP, &hdev->flags)) {
544 hci_dev_lock_bh(hdev); 545 hci_dev_lock(hdev);
545 mgmt_powered(hdev, 1); 546 mgmt_powered(hdev, 1);
546 hci_dev_unlock_bh(hdev); 547 hci_dev_unlock(hdev);
547 } 548 }
548 } else { 549 } else {
549 /* Init failed, cleanup */ 550 /* Init failed, cleanup */
550 tasklet_kill(&hdev->rx_task); 551 flush_work(&hdev->tx_work);
551 tasklet_kill(&hdev->tx_task); 552 flush_work(&hdev->cmd_work);
552 tasklet_kill(&hdev->cmd_task); 553 flush_work(&hdev->rx_work);
553 554
554 skb_queue_purge(&hdev->cmd_q); 555 skb_queue_purge(&hdev->cmd_q);
555 skb_queue_purge(&hdev->rx_q); 556 skb_queue_purge(&hdev->rx_q);
@@ -585,9 +586,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
585 return 0; 586 return 0;
586 } 587 }
587 588
588 /* Kill RX and TX tasks */ 589 /* Flush RX and TX works */
589 tasklet_kill(&hdev->rx_task); 590 flush_work(&hdev->tx_work);
590 tasklet_kill(&hdev->tx_task); 591 flush_work(&hdev->rx_work);
591 592
592 if (hdev->discov_timeout > 0) { 593 if (hdev->discov_timeout > 0) {
593 cancel_delayed_work(&hdev->discov_off); 594 cancel_delayed_work(&hdev->discov_off);
@@ -597,10 +598,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 598 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
598 cancel_delayed_work(&hdev->power_off); 599 cancel_delayed_work(&hdev->power_off);
599 600
600 hci_dev_lock_bh(hdev); 601 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
602 cancel_delayed_work(&hdev->service_cache);
603
604 hci_dev_lock(hdev);
601 inquiry_cache_flush(hdev); 605 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev); 606 hci_conn_hash_flush(hdev);
603 hci_dev_unlock_bh(hdev); 607 hci_dev_unlock(hdev);
604 608
605 hci_notify(hdev, HCI_DEV_DOWN); 609 hci_notify(hdev, HCI_DEV_DOWN);
606 610
@@ -617,8 +621,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
617 clear_bit(HCI_INIT, &hdev->flags); 621 clear_bit(HCI_INIT, &hdev->flags);
618 } 622 }
619 623
620 /* Kill cmd task */ 624 /* flush cmd work */
621 tasklet_kill(&hdev->cmd_task); 625 flush_work(&hdev->cmd_work);
622 626
623 /* Drop queues */ 627 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q); 628 skb_queue_purge(&hdev->rx_q);
@@ -636,9 +640,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
636 * and no tasks are scheduled. */ 640 * and no tasks are scheduled. */
637 hdev->close(hdev); 641 hdev->close(hdev);
638 642
639 hci_dev_lock_bh(hdev); 643 hci_dev_lock(hdev);
640 mgmt_powered(hdev, 0); 644 mgmt_powered(hdev, 0);
641 hci_dev_unlock_bh(hdev); 645 hci_dev_unlock(hdev);
642 646
643 /* Clear flags */ 647 /* Clear flags */
644 hdev->flags = 0; 648 hdev->flags = 0;
@@ -672,7 +676,6 @@ int hci_dev_reset(__u16 dev)
672 return -ENODEV; 676 return -ENODEV;
673 677
674 hci_req_lock(hdev); 678 hci_req_lock(hdev);
675 tasklet_disable(&hdev->tx_task);
676 679
677 if (!test_bit(HCI_UP, &hdev->flags)) 680 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done; 681 goto done;
@@ -681,10 +684,10 @@ int hci_dev_reset(__u16 dev)
681 skb_queue_purge(&hdev->rx_q); 684 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q); 685 skb_queue_purge(&hdev->cmd_q);
683 686
684 hci_dev_lock_bh(hdev); 687 hci_dev_lock(hdev);
685 inquiry_cache_flush(hdev); 688 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev); 689 hci_conn_hash_flush(hdev);
687 hci_dev_unlock_bh(hdev); 690 hci_dev_unlock(hdev);
688 691
689 if (hdev->flush) 692 if (hdev->flush)
690 hdev->flush(hdev); 693 hdev->flush(hdev);
@@ -697,7 +700,6 @@ int hci_dev_reset(__u16 dev)
697 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 700 msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 701
699done: 702done:
700 tasklet_enable(&hdev->tx_task);
701 hci_req_unlock(hdev); 703 hci_req_unlock(hdev);
702 hci_dev_put(hdev); 704 hci_dev_put(hdev);
703 return ret; 705 return ret;
@@ -939,7 +941,7 @@ static void hci_power_on(struct work_struct *work)
939 return; 941 return;
940 942
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags)) 943 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 queue_delayed_work(hdev->workqueue, &hdev->power_off, 944 schedule_delayed_work(&hdev->power_off,
943 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 945 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944 946
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) 947 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
@@ -967,13 +969,13 @@ static void hci_discov_off(struct work_struct *work)
967 969
968 BT_DBG("%s", hdev->name); 970 BT_DBG("%s", hdev->name);
969 971
970 hci_dev_lock_bh(hdev); 972 hci_dev_lock(hdev);
971 973
972 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); 974 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
973 975
974 hdev->discov_timeout = 0; 976 hdev->discov_timeout = 0;
975 977
976 hci_dev_unlock_bh(hdev); 978 hci_dev_unlock(hdev);
977} 979}
978 980
979int hci_uuids_clear(struct hci_dev *hdev) 981int hci_uuids_clear(struct hci_dev *hdev)
@@ -1207,7 +1209,7 @@ static void hci_cmd_timer(unsigned long arg)
1207 1209
1208 BT_ERR("%s command tx timeout", hdev->name); 1210 BT_ERR("%s command tx timeout", hdev->name);
1209 atomic_set(&hdev->cmd_cnt, 1); 1211 atomic_set(&hdev->cmd_cnt, 1);
1210 tasklet_schedule(&hdev->cmd_task); 1212 queue_work(hdev->workqueue, &hdev->cmd_work);
1211} 1213}
1212 1214
1213struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1215struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
@@ -1340,9 +1342,10 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1340 return mgmt_device_unblocked(hdev, bdaddr); 1342 return mgmt_device_unblocked(hdev, bdaddr);
1341} 1343}
1342 1344
1343static void hci_clear_adv_cache(unsigned long arg) 1345static void hci_clear_adv_cache(struct work_struct *work)
1344{ 1346{
1345 struct hci_dev *hdev = (void *) arg; 1347 struct hci_dev *hdev = container_of(work, struct hci_dev,
1348 adv_work.work);
1346 1349
1347 hci_dev_lock(hdev); 1350 hci_dev_lock(hdev);
1348 1351
@@ -1443,7 +1446,7 @@ int hci_register_dev(struct hci_dev *hdev)
1443 list_add_tail(&hdev->list, head); 1446 list_add_tail(&hdev->list, head);
1444 1447
1445 atomic_set(&hdev->refcnt, 1); 1448 atomic_set(&hdev->refcnt, 1);
1446 spin_lock_init(&hdev->lock); 1449 mutex_init(&hdev->lock);
1447 1450
1448 hdev->flags = 0; 1451 hdev->flags = 0;
1449 hdev->dev_flags = 0; 1452 hdev->dev_flags = 0;
@@ -1456,9 +1459,10 @@ int hci_register_dev(struct hci_dev *hdev)
1456 hdev->sniff_max_interval = 800; 1459 hdev->sniff_max_interval = 800;
1457 hdev->sniff_min_interval = 80; 1460 hdev->sniff_min_interval = 80;
1458 1461
1459 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev); 1462 INIT_WORK(&hdev->rx_work, hci_rx_work);
1460 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 1463 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1461 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 1464 INIT_WORK(&hdev->tx_work, hci_tx_work);
1465
1462 1466
1463 skb_queue_head_init(&hdev->rx_q); 1467 skb_queue_head_init(&hdev->rx_q);
1464 skb_queue_head_init(&hdev->cmd_q); 1468 skb_queue_head_init(&hdev->cmd_q);
@@ -1487,9 +1491,8 @@ int hci_register_dev(struct hci_dev *hdev)
1487 INIT_LIST_HEAD(&hdev->remote_oob_data); 1491 INIT_LIST_HEAD(&hdev->remote_oob_data);
1488 1492
1489 INIT_LIST_HEAD(&hdev->adv_entries); 1493 INIT_LIST_HEAD(&hdev->adv_entries);
1490 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1491 (unsigned long) hdev);
1492 1494
1495 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1493 INIT_WORK(&hdev->power_on, hci_power_on); 1496 INIT_WORK(&hdev->power_on, hci_power_on);
1494 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 1497 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1495 1498
@@ -1501,7 +1504,8 @@ int hci_register_dev(struct hci_dev *hdev)
1501 1504
1502 write_unlock_bh(&hci_dev_list_lock); 1505 write_unlock_bh(&hci_dev_list_lock);
1503 1506
1504 hdev->workqueue = create_singlethread_workqueue(hdev->name); 1507 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1508 WQ_MEM_RECLAIM, 1);
1505 if (!hdev->workqueue) { 1509 if (!hdev->workqueue) {
1506 error = -ENOMEM; 1510 error = -ENOMEM;
1507 goto err; 1511 goto err;
@@ -1522,7 +1526,7 @@ int hci_register_dev(struct hci_dev *hdev)
1522 1526
1523 set_bit(HCI_AUTO_OFF, &hdev->flags); 1527 set_bit(HCI_AUTO_OFF, &hdev->flags);
1524 set_bit(HCI_SETUP, &hdev->flags); 1528 set_bit(HCI_SETUP, &hdev->flags);
1525 queue_work(hdev->workqueue, &hdev->power_on); 1529 schedule_work(&hdev->power_on);
1526 1530
1527 hci_notify(hdev, HCI_DEV_REG); 1531 hci_notify(hdev, HCI_DEV_REG);
1528 1532
@@ -1557,9 +1561,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
1557 1561
1558 if (!test_bit(HCI_INIT, &hdev->flags) && 1562 if (!test_bit(HCI_INIT, &hdev->flags) &&
1559 !test_bit(HCI_SETUP, &hdev->flags)) { 1563 !test_bit(HCI_SETUP, &hdev->flags)) {
1560 hci_dev_lock_bh(hdev); 1564 hci_dev_lock(hdev);
1561 mgmt_index_removed(hdev); 1565 mgmt_index_removed(hdev);
1562 hci_dev_unlock_bh(hdev); 1566 hci_dev_unlock(hdev);
1563 } 1567 }
1564 1568
1565 /* mgmt_index_removed should take care of emptying the 1569 /* mgmt_index_removed should take care of emptying the
@@ -1575,17 +1579,17 @@ void hci_unregister_dev(struct hci_dev *hdev)
1575 1579
1576 hci_del_sysfs(hdev); 1580 hci_del_sysfs(hdev);
1577 1581
1578 del_timer(&hdev->adv_timer); 1582 cancel_delayed_work_sync(&hdev->adv_work);
1579 1583
1580 destroy_workqueue(hdev->workqueue); 1584 destroy_workqueue(hdev->workqueue);
1581 1585
1582 hci_dev_lock_bh(hdev); 1586 hci_dev_lock(hdev);
1583 hci_blacklist_clear(hdev); 1587 hci_blacklist_clear(hdev);
1584 hci_uuids_clear(hdev); 1588 hci_uuids_clear(hdev);
1585 hci_link_keys_clear(hdev); 1589 hci_link_keys_clear(hdev);
1586 hci_remote_oob_data_clear(hdev); 1590 hci_remote_oob_data_clear(hdev);
1587 hci_adv_entries_clear(hdev); 1591 hci_adv_entries_clear(hdev);
1588 hci_dev_unlock_bh(hdev); 1592 hci_dev_unlock(hdev);
1589 1593
1590 __hci_dev_put(hdev); 1594 __hci_dev_put(hdev);
1591} 1595}
@@ -1623,9 +1627,8 @@ int hci_recv_frame(struct sk_buff *skb)
1623 /* Time stamp */ 1627 /* Time stamp */
1624 __net_timestamp(skb); 1628 __net_timestamp(skb);
1625 1629
1626 /* Queue frame for rx task */
1627 skb_queue_tail(&hdev->rx_q, skb); 1630 skb_queue_tail(&hdev->rx_q, skb);
1628 tasklet_schedule(&hdev->rx_task); 1631 queue_work(hdev->workqueue, &hdev->rx_work);
1629 1632
1630 return 0; 1633 return 0;
1631} 1634}
@@ -1808,14 +1811,14 @@ int hci_register_proto(struct hci_proto *hp)
1808 if (hp->id >= HCI_MAX_PROTO) 1811 if (hp->id >= HCI_MAX_PROTO)
1809 return -EINVAL; 1812 return -EINVAL;
1810 1813
1811 write_lock_bh(&hci_task_lock); 1814 mutex_lock(&hci_task_lock);
1812 1815
1813 if (!hci_proto[hp->id]) 1816 if (!hci_proto[hp->id])
1814 hci_proto[hp->id] = hp; 1817 hci_proto[hp->id] = hp;
1815 else 1818 else
1816 err = -EEXIST; 1819 err = -EEXIST;
1817 1820
1818 write_unlock_bh(&hci_task_lock); 1821 mutex_unlock(&hci_task_lock);
1819 1822
1820 return err; 1823 return err;
1821} 1824}
@@ -1830,14 +1833,14 @@ int hci_unregister_proto(struct hci_proto *hp)
1830 if (hp->id >= HCI_MAX_PROTO) 1833 if (hp->id >= HCI_MAX_PROTO)
1831 return -EINVAL; 1834 return -EINVAL;
1832 1835
1833 write_lock_bh(&hci_task_lock); 1836 mutex_lock(&hci_task_lock);
1834 1837
1835 if (hci_proto[hp->id]) 1838 if (hci_proto[hp->id])
1836 hci_proto[hp->id] = NULL; 1839 hci_proto[hp->id] = NULL;
1837 else 1840 else
1838 err = -ENOENT; 1841 err = -ENOENT;
1839 1842
1840 write_unlock_bh(&hci_task_lock); 1843 mutex_unlock(&hci_task_lock);
1841 1844
1842 return err; 1845 return err;
1843} 1846}
@@ -1922,7 +1925,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1922 hdev->init_last_cmd = opcode; 1925 hdev->init_last_cmd = opcode;
1923 1926
1924 skb_queue_tail(&hdev->cmd_q, skb); 1927 skb_queue_tail(&hdev->cmd_q, skb);
1925 tasklet_schedule(&hdev->cmd_task); 1928 queue_work(hdev->workqueue, &hdev->cmd_work);
1926 1929
1927 return 0; 1930 return 0;
1928} 1931}
@@ -2012,7 +2015,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2012 2015
2013 hci_queue_acl(conn, &chan->data_q, skb, flags); 2016 hci_queue_acl(conn, &chan->data_q, skb, flags);
2014 2017
2015 tasklet_schedule(&hdev->tx_task); 2018 queue_work(hdev->workqueue, &hdev->tx_work);
2016} 2019}
2017EXPORT_SYMBOL(hci_send_acl); 2020EXPORT_SYMBOL(hci_send_acl);
2018 2021
@@ -2035,7 +2038,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2035 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 2038 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2036 2039
2037 skb_queue_tail(&conn->data_q, skb); 2040 skb_queue_tail(&conn->data_q, skb);
2038 tasklet_schedule(&hdev->tx_task); 2041 queue_work(hdev->workqueue, &hdev->tx_work);
2039} 2042}
2040EXPORT_SYMBOL(hci_send_sco); 2043EXPORT_SYMBOL(hci_send_sco);
2041 2044
@@ -2050,7 +2053,10 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2050 2053
2051 /* We don't have to lock device here. Connections are always 2054 /* We don't have to lock device here. Connections are always
2052 * added and removed with TX task disabled. */ 2055 * added and removed with TX task disabled. */
2053 list_for_each_entry(c, &h->list, list) { 2056
2057 rcu_read_lock();
2058
2059 list_for_each_entry_rcu(c, &h->list, list) {
2054 if (c->type != type || skb_queue_empty(&c->data_q)) 2060 if (c->type != type || skb_queue_empty(&c->data_q))
2055 continue; 2061 continue;
2056 2062
@@ -2068,6 +2074,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2068 break; 2074 break;
2069 } 2075 }
2070 2076
2077 rcu_read_unlock();
2078
2071 if (conn) { 2079 if (conn) {
2072 int cnt, q; 2080 int cnt, q;
2073 2081
@@ -2103,14 +2111,18 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2103 2111
2104 BT_ERR("%s link tx timeout", hdev->name); 2112 BT_ERR("%s link tx timeout", hdev->name);
2105 2113
2114 rcu_read_lock();
2115
2106 /* Kill stalled connections */ 2116 /* Kill stalled connections */
2107 list_for_each_entry(c, &h->list, list) { 2117 list_for_each_entry_rcu(c, &h->list, list) {
2108 if (c->type == type && c->sent) { 2118 if (c->type == type && c->sent) {
2109 BT_ERR("%s killing stalled connection %s", 2119 BT_ERR("%s killing stalled connection %s",
2110 hdev->name, batostr(&c->dst)); 2120 hdev->name, batostr(&c->dst));
2111 hci_acl_disconn(c, 0x13); 2121 hci_acl_disconn(c, 0x13);
2112 } 2122 }
2113 } 2123 }
2124
2125 rcu_read_unlock();
2114} 2126}
2115 2127
2116static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2128static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
@@ -2124,8 +2136,9 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2124 2136
2125 BT_DBG("%s", hdev->name); 2137 BT_DBG("%s", hdev->name);
2126 2138
2127 list_for_each_entry(conn, &h->list, list) { 2139 rcu_read_lock();
2128 struct hci_chan_hash *ch; 2140
2141 list_for_each_entry_rcu(conn, &h->list, list) {
2129 struct hci_chan *tmp; 2142 struct hci_chan *tmp;
2130 2143
2131 if (conn->type != type) 2144 if (conn->type != type)
@@ -2136,9 +2149,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2136 2149
2137 conn_num++; 2150 conn_num++;
2138 2151
2139 ch = &conn->chan_hash; 2152 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2140
2141 list_for_each_entry(tmp, &ch->list, list) {
2142 struct sk_buff *skb; 2153 struct sk_buff *skb;
2143 2154
2144 if (skb_queue_empty(&tmp->data_q)) 2155 if (skb_queue_empty(&tmp->data_q))
@@ -2166,6 +2177,8 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2166 break; 2177 break;
2167 } 2178 }
2168 2179
2180 rcu_read_unlock();
2181
2169 if (!chan) 2182 if (!chan)
2170 return NULL; 2183 return NULL;
2171 2184
@@ -2199,8 +2212,9 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2199 2212
2200 BT_DBG("%s", hdev->name); 2213 BT_DBG("%s", hdev->name);
2201 2214
2202 list_for_each_entry(conn, &h->list, list) { 2215 rcu_read_lock();
2203 struct hci_chan_hash *ch; 2216
2217 list_for_each_entry_rcu(conn, &h->list, list) {
2204 struct hci_chan *chan; 2218 struct hci_chan *chan;
2205 2219
2206 if (conn->type != type) 2220 if (conn->type != type)
@@ -2211,8 +2225,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2211 2225
2212 num++; 2226 num++;
2213 2227
2214 ch = &conn->chan_hash; 2228 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2215 list_for_each_entry(chan, &ch->list, list) {
2216 struct sk_buff *skb; 2229 struct sk_buff *skb;
2217 2230
2218 if (chan->sent) { 2231 if (chan->sent) {
@@ -2236,6 +2249,9 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2236 if (hci_conn_num(hdev, type) == num) 2249 if (hci_conn_num(hdev, type) == num)
2237 break; 2250 break;
2238 } 2251 }
2252
2253 rcu_read_unlock();
2254
2239} 2255}
2240 2256
2241static inline void hci_sched_acl(struct hci_dev *hdev) 2257static inline void hci_sched_acl(struct hci_dev *hdev)
@@ -2386,12 +2402,12 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2386 hci_prio_recalculate(hdev, LE_LINK); 2402 hci_prio_recalculate(hdev, LE_LINK);
2387} 2403}
2388 2404
2389static void hci_tx_task(unsigned long arg) 2405static void hci_tx_work(struct work_struct *work)
2390{ 2406{
2391 struct hci_dev *hdev = (struct hci_dev *) arg; 2407 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2392 struct sk_buff *skb; 2408 struct sk_buff *skb;
2393 2409
2394 read_lock(&hci_task_lock); 2410 mutex_lock(&hci_task_lock);
2395 2411
2396 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2412 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2397 hdev->sco_cnt, hdev->le_cnt); 2413 hdev->sco_cnt, hdev->le_cnt);
@@ -2410,7 +2426,7 @@ static void hci_tx_task(unsigned long arg)
2410 while ((skb = skb_dequeue(&hdev->raw_q))) 2426 while ((skb = skb_dequeue(&hdev->raw_q)))
2411 hci_send_frame(skb); 2427 hci_send_frame(skb);
2412 2428
2413 read_unlock(&hci_task_lock); 2429 mutex_unlock(&hci_task_lock);
2414} 2430}
2415 2431
2416/* ----- HCI RX task (incoming data processing) ----- */ 2432/* ----- HCI RX task (incoming data processing) ----- */
@@ -2439,7 +2455,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2439 if (conn) { 2455 if (conn) {
2440 register struct hci_proto *hp; 2456 register struct hci_proto *hp;
2441 2457
2442 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); 2458 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2443 2459
2444 /* Send to upper protocol */ 2460 /* Send to upper protocol */
2445 hp = hci_proto[HCI_PROTO_L2CAP]; 2461 hp = hci_proto[HCI_PROTO_L2CAP];
@@ -2491,14 +2507,14 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2491 kfree_skb(skb); 2507 kfree_skb(skb);
2492} 2508}
2493 2509
2494static void hci_rx_task(unsigned long arg) 2510static void hci_rx_work(struct work_struct *work)
2495{ 2511{
2496 struct hci_dev *hdev = (struct hci_dev *) arg; 2512 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2497 struct sk_buff *skb; 2513 struct sk_buff *skb;
2498 2514
2499 BT_DBG("%s", hdev->name); 2515 BT_DBG("%s", hdev->name);
2500 2516
2501 read_lock(&hci_task_lock); 2517 mutex_lock(&hci_task_lock);
2502 2518
2503 while ((skb = skb_dequeue(&hdev->rx_q))) { 2519 while ((skb = skb_dequeue(&hdev->rx_q))) {
2504 if (atomic_read(&hdev->promisc)) { 2520 if (atomic_read(&hdev->promisc)) {
@@ -2524,6 +2540,7 @@ static void hci_rx_task(unsigned long arg)
2524 /* Process frame */ 2540 /* Process frame */
2525 switch (bt_cb(skb)->pkt_type) { 2541 switch (bt_cb(skb)->pkt_type) {
2526 case HCI_EVENT_PKT: 2542 case HCI_EVENT_PKT:
2543 BT_DBG("%s Event packet", hdev->name);
2527 hci_event_packet(hdev, skb); 2544 hci_event_packet(hdev, skb);
2528 break; 2545 break;
2529 2546
@@ -2543,12 +2560,12 @@ static void hci_rx_task(unsigned long arg)
2543 } 2560 }
2544 } 2561 }
2545 2562
2546 read_unlock(&hci_task_lock); 2563 mutex_unlock(&hci_task_lock);
2547} 2564}
2548 2565
2549static void hci_cmd_task(unsigned long arg) 2566static void hci_cmd_work(struct work_struct *work)
2550{ 2567{
2551 struct hci_dev *hdev = (struct hci_dev *) arg; 2568 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2552 struct sk_buff *skb; 2569 struct sk_buff *skb;
2553 2570
2554 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2571 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2572,7 +2589,7 @@ static void hci_cmd_task(unsigned long arg)
2572 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2589 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2573 } else { 2590 } else {
2574 skb_queue_head(&hdev->cmd_q, skb); 2591 skb_queue_head(&hdev->cmd_q, skb);
2575 tasklet_schedule(&hdev->cmd_task); 2592 queue_work(hdev->workqueue, &hdev->cmd_work);
2576 } 2593 }
2577 } 2594 }
2578} 2595}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 35cb56ed3b0b..fc5338fc2a6e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -378,11 +378,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
378 378
379 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 379 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380 380
381 if (hdev->notify) { 381 if (hdev->notify)
382 tasklet_disable(&hdev->tx_task);
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 382 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 tasklet_enable(&hdev->tx_task);
385 }
386} 383}
387 384
388static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 385static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -409,11 +406,8 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
409 406
410 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 407 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
411 408
412 if (hdev->notify) { 409 if (hdev->notify)
413 tasklet_disable(&hdev->tx_task);
414 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 410 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
415 tasklet_enable(&hdev->tx_task);
416 }
417} 411}
418 412
419static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 413static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -773,6 +767,28 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
773 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 767 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
774} 768}
775 769
770static void hci_cc_read_data_block_size(struct hci_dev *hdev,
771 struct sk_buff *skb)
772{
773 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
774
775 BT_DBG("%s status 0x%x", hdev->name, rp->status);
776
777 if (rp->status)
778 return;
779
780 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
781 hdev->block_len = __le16_to_cpu(rp->block_len);
782 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
783
784 hdev->block_cnt = hdev->num_blocks;
785
786 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
787 hdev->block_cnt, hdev->block_len);
788
789 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
790}
791
776static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 792static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
777{ 793{
778 __u8 status = *((__u8 *) skb->data); 794 __u8 status = *((__u8 *) skb->data);
@@ -1017,7 +1033,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1017 if (cp->enable == 0x01) { 1033 if (cp->enable == 0x01) {
1018 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1034 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1019 1035
1020 del_timer(&hdev->adv_timer); 1036 cancel_delayed_work_sync(&hdev->adv_work);
1021 1037
1022 hci_dev_lock(hdev); 1038 hci_dev_lock(hdev);
1023 hci_adv_entries_clear(hdev); 1039 hci_adv_entries_clear(hdev);
@@ -1025,7 +1041,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1025 } else if (cp->enable == 0x00) { 1041 } else if (cp->enable == 0x00) {
1026 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1042 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1027 1043
1028 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 1044 cancel_delayed_work_sync(&hdev->adv_work);
1045 queue_delayed_work(hdev->workqueue, &hdev->adv_work,
1046 jiffies + ADV_CLEAR_TIMEOUT);
1029 } 1047 }
1030} 1048}
1031 1049
@@ -2022,6 +2040,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2022 hci_cc_read_bd_addr(hdev, skb); 2040 hci_cc_read_bd_addr(hdev, skb);
2023 break; 2041 break;
2024 2042
2043 case HCI_OP_READ_DATA_BLOCK_SIZE:
2044 hci_cc_read_data_block_size(hdev, skb);
2045 break;
2046
2025 case HCI_OP_WRITE_CA_TIMEOUT: 2047 case HCI_OP_WRITE_CA_TIMEOUT:
2026 hci_cc_write_ca_timeout(hdev, skb); 2048 hci_cc_write_ca_timeout(hdev, skb);
2027 break; 2049 break;
@@ -2116,7 +2138,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2116 if (ev->ncmd) { 2138 if (ev->ncmd) {
2117 atomic_set(&hdev->cmd_cnt, 1); 2139 atomic_set(&hdev->cmd_cnt, 1);
2118 if (!skb_queue_empty(&hdev->cmd_q)) 2140 if (!skb_queue_empty(&hdev->cmd_q))
2119 tasklet_schedule(&hdev->cmd_task); 2141 queue_work(hdev->workqueue, &hdev->cmd_work);
2120 } 2142 }
2121} 2143}
2122 2144
@@ -2198,7 +2220,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2198 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2220 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2199 atomic_set(&hdev->cmd_cnt, 1); 2221 atomic_set(&hdev->cmd_cnt, 1);
2200 if (!skb_queue_empty(&hdev->cmd_q)) 2222 if (!skb_queue_empty(&hdev->cmd_q))
2201 tasklet_schedule(&hdev->cmd_task); 2223 queue_work(hdev->workqueue, &hdev->cmd_work);
2202 } 2224 }
2203} 2225}
2204 2226
@@ -2243,8 +2265,6 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2243 return; 2265 return;
2244 } 2266 }
2245 2267
2246 tasklet_disable(&hdev->tx_task);
2247
2248 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 2268 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
2249 struct hci_conn *conn; 2269 struct hci_conn *conn;
2250 __u16 handle, count; 2270 __u16 handle, count;
@@ -2253,34 +2273,43 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2253 count = get_unaligned_le16(ptr++); 2273 count = get_unaligned_le16(ptr++);
2254 2274
2255 conn = hci_conn_hash_lookup_handle(hdev, handle); 2275 conn = hci_conn_hash_lookup_handle(hdev, handle);
2256 if (conn) { 2276 if (!conn)
2257 conn->sent -= count; 2277 continue;
2258 2278
2259 if (conn->type == ACL_LINK) { 2279 conn->sent -= count;
2280
2281 switch (conn->type) {
2282 case ACL_LINK:
2283 hdev->acl_cnt += count;
2284 if (hdev->acl_cnt > hdev->acl_pkts)
2285 hdev->acl_cnt = hdev->acl_pkts;
2286 break;
2287
2288 case LE_LINK:
2289 if (hdev->le_pkts) {
2290 hdev->le_cnt += count;
2291 if (hdev->le_cnt > hdev->le_pkts)
2292 hdev->le_cnt = hdev->le_pkts;
2293 } else {
2260 hdev->acl_cnt += count; 2294 hdev->acl_cnt += count;
2261 if (hdev->acl_cnt > hdev->acl_pkts) 2295 if (hdev->acl_cnt > hdev->acl_pkts)
2262 hdev->acl_cnt = hdev->acl_pkts; 2296 hdev->acl_cnt = hdev->acl_pkts;
2263 } else if (conn->type == LE_LINK) {
2264 if (hdev->le_pkts) {
2265 hdev->le_cnt += count;
2266 if (hdev->le_cnt > hdev->le_pkts)
2267 hdev->le_cnt = hdev->le_pkts;
2268 } else {
2269 hdev->acl_cnt += count;
2270 if (hdev->acl_cnt > hdev->acl_pkts)
2271 hdev->acl_cnt = hdev->acl_pkts;
2272 }
2273 } else {
2274 hdev->sco_cnt += count;
2275 if (hdev->sco_cnt > hdev->sco_pkts)
2276 hdev->sco_cnt = hdev->sco_pkts;
2277 } 2297 }
2298 break;
2299
2300 case SCO_LINK:
2301 hdev->sco_cnt += count;
2302 if (hdev->sco_cnt > hdev->sco_pkts)
2303 hdev->sco_cnt = hdev->sco_pkts;
2304 break;
2305
2306 default:
2307 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2308 break;
2278 } 2309 }
2279 } 2310 }
2280 2311
2281 tasklet_schedule(&hdev->tx_task); 2312 queue_work(hdev->workqueue, &hdev->tx_work);
2282
2283 tasklet_enable(&hdev->tx_task);
2284} 2313}
2285 2314
2286static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2315static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f6afe3d76a66..189a667c293b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
188 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 188 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
189 return -EFAULT; 189 return -EFAULT;
190 190
191 hci_dev_lock_bh(hdev); 191 hci_dev_lock(hdev);
192 192
193 err = hci_blacklist_add(hdev, &bdaddr); 193 err = hci_blacklist_add(hdev, &bdaddr);
194 194
195 hci_dev_unlock_bh(hdev); 195 hci_dev_unlock(hdev);
196 196
197 return err; 197 return err;
198} 198}
@@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
205 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 205 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
206 return -EFAULT; 206 return -EFAULT;
207 207
208 hci_dev_lock_bh(hdev); 208 hci_dev_lock(hdev);
209 209
210 err = hci_blacklist_del(hdev, &bdaddr); 210 err = hci_blacklist_del(hdev, &bdaddr);
211 211
212 hci_dev_unlock_bh(hdev); 212 hci_dev_unlock(hdev);
213 213
214 return err; 214 return err;
215} 215}
@@ -343,8 +343,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
343 if (haddr.hci_channel > HCI_CHANNEL_CONTROL) 343 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
344 return -EINVAL; 344 return -EINVAL;
345 345
346 if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt) 346 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
347 return -EINVAL; 347 if (!enable_mgmt)
348 return -EINVAL;
349 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
350 }
348 351
349 lock_sock(sk); 352 lock_sock(sk);
350 353
@@ -535,10 +538,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
535 538
536 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { 539 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
537 skb_queue_tail(&hdev->raw_q, skb); 540 skb_queue_tail(&hdev->raw_q, skb);
538 tasklet_schedule(&hdev->tx_task); 541 queue_work(hdev->workqueue, &hdev->tx_work);
539 } else { 542 } else {
540 skb_queue_tail(&hdev->cmd_q, skb); 543 skb_queue_tail(&hdev->cmd_q, skb);
541 tasklet_schedule(&hdev->cmd_task); 544 queue_work(hdev->workqueue, &hdev->cmd_work);
542 } 545 }
543 } else { 546 } else {
544 if (!capable(CAP_NET_RAW)) { 547 if (!capable(CAP_NET_RAW)) {
@@ -547,7 +550,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
547 } 550 }
548 551
549 skb_queue_tail(&hdev->raw_q, skb); 552 skb_queue_tail(&hdev->raw_q, skb);
550 tasklet_schedule(&hdev->tx_task); 553 queue_work(hdev->workqueue, &hdev->tx_work);
551 } 554 }
552 555
553 err = len; 556 err = len;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index c62d254a1379..521095614235 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -89,11 +89,35 @@ static struct device_type bt_link = {
89 .release = bt_link_release, 89 .release = bt_link_release,
90}; 90};
91 91
92static void add_conn(struct work_struct *work) 92/*
93 * The rfcomm tty device will possibly retain even when conn
94 * is down, and sysfs doesn't support move zombie device,
95 * so we should move the device before conn device is destroyed.
96 */
97static int __match_tty(struct device *dev, void *data)
98{
99 return !strncmp(dev_name(dev), "rfcomm", 6);
100}
101
102void hci_conn_init_sysfs(struct hci_conn *conn)
103{
104 struct hci_dev *hdev = conn->hdev;
105
106 BT_DBG("conn %p", conn);
107
108 conn->dev.type = &bt_link;
109 conn->dev.class = bt_class;
110 conn->dev.parent = &hdev->dev;
111
112 device_initialize(&conn->dev);
113}
114
115void hci_conn_add_sysfs(struct hci_conn *conn)
93{ 116{
94 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
95 struct hci_dev *hdev = conn->hdev; 117 struct hci_dev *hdev = conn->hdev;
96 118
119 BT_DBG("conn %p", conn);
120
97 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 121 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
98 122
99 dev_set_drvdata(&conn->dev, conn); 123 dev_set_drvdata(&conn->dev, conn);
@@ -106,19 +130,8 @@ static void add_conn(struct work_struct *work)
106 hci_dev_hold(hdev); 130 hci_dev_hold(hdev);
107} 131}
108 132
109/* 133void hci_conn_del_sysfs(struct hci_conn *conn)
110 * The rfcomm tty device will possibly retain even when conn
111 * is down, and sysfs doesn't support move zombie device,
112 * so we should move the device before conn device is destroyed.
113 */
114static int __match_tty(struct device *dev, void *data)
115{
116 return !strncmp(dev_name(dev), "rfcomm", 6);
117}
118
119static void del_conn(struct work_struct *work)
120{ 134{
121 struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
122 struct hci_dev *hdev = conn->hdev; 135 struct hci_dev *hdev = conn->hdev;
123 136
124 if (!device_is_registered(&conn->dev)) 137 if (!device_is_registered(&conn->dev))
@@ -140,36 +153,6 @@ static void del_conn(struct work_struct *work)
140 hci_dev_put(hdev); 153 hci_dev_put(hdev);
141} 154}
142 155
143void hci_conn_init_sysfs(struct hci_conn *conn)
144{
145 struct hci_dev *hdev = conn->hdev;
146
147 BT_DBG("conn %p", conn);
148
149 conn->dev.type = &bt_link;
150 conn->dev.class = bt_class;
151 conn->dev.parent = &hdev->dev;
152
153 device_initialize(&conn->dev);
154
155 INIT_WORK(&conn->work_add, add_conn);
156 INIT_WORK(&conn->work_del, del_conn);
157}
158
159void hci_conn_add_sysfs(struct hci_conn *conn)
160{
161 BT_DBG("conn %p", conn);
162
163 queue_work(conn->hdev->workqueue, &conn->work_add);
164}
165
166void hci_conn_del_sysfs(struct hci_conn *conn)
167{
168 BT_DBG("conn %p", conn);
169
170 queue_work(conn->hdev->workqueue, &conn->work_del);
171}
172
173static inline char *host_bustostr(int bus) 156static inline char *host_bustostr(int bus)
174{ 157{
175 switch (bus) { 158 switch (bus) {
@@ -403,7 +386,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
403 struct inquiry_cache *cache = &hdev->inq_cache; 386 struct inquiry_cache *cache = &hdev->inq_cache;
404 struct inquiry_entry *e; 387 struct inquiry_entry *e;
405 388
406 hci_dev_lock_bh(hdev); 389 hci_dev_lock(hdev);
407 390
408 for (e = cache->list; e; e = e->next) { 391 for (e = cache->list; e; e = e->next) {
409 struct inquiry_data *data = &e->data; 392 struct inquiry_data *data = &e->data;
@@ -416,7 +399,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
416 data->rssi, data->ssp_mode, e->timestamp); 399 data->rssi, data->ssp_mode, e->timestamp);
417 } 400 }
418 401
419 hci_dev_unlock_bh(hdev); 402 hci_dev_unlock(hdev);
420 403
421 return 0; 404 return 0;
422} 405}
@@ -438,12 +421,12 @@ static int blacklist_show(struct seq_file *f, void *p)
438 struct hci_dev *hdev = f->private; 421 struct hci_dev *hdev = f->private;
439 struct bdaddr_list *b; 422 struct bdaddr_list *b;
440 423
441 hci_dev_lock_bh(hdev); 424 hci_dev_lock(hdev);
442 425
443 list_for_each_entry(b, &hdev->blacklist, list) 426 list_for_each_entry(b, &hdev->blacklist, list)
444 seq_printf(f, "%s\n", batostr(&b->bdaddr)); 427 seq_printf(f, "%s\n", batostr(&b->bdaddr));
445 428
446 hci_dev_unlock_bh(hdev); 429 hci_dev_unlock(hdev);
447 430
448 return 0; 431 return 0;
449} 432}
@@ -482,12 +465,12 @@ static int uuids_show(struct seq_file *f, void *p)
482 struct hci_dev *hdev = f->private; 465 struct hci_dev *hdev = f->private;
483 struct bt_uuid *uuid; 466 struct bt_uuid *uuid;
484 467
485 hci_dev_lock_bh(hdev); 468 hci_dev_lock(hdev);
486 469
487 list_for_each_entry(uuid, &hdev->uuids, list) 470 list_for_each_entry(uuid, &hdev->uuids, list)
488 print_bt_uuid(f, uuid->uuid); 471 print_bt_uuid(f, uuid->uuid);
489 472
490 hci_dev_unlock_bh(hdev); 473 hci_dev_unlock(hdev);
491 474
492 return 0; 475 return 0;
493} 476}
@@ -508,11 +491,11 @@ static int auto_accept_delay_set(void *data, u64 val)
508{ 491{
509 struct hci_dev *hdev = data; 492 struct hci_dev *hdev = data;
510 493
511 hci_dev_lock_bh(hdev); 494 hci_dev_lock(hdev);
512 495
513 hdev->auto_accept_delay = val; 496 hdev->auto_accept_delay = val;
514 497
515 hci_dev_unlock_bh(hdev); 498 hci_dev_unlock(hdev);
516 499
517 return 0; 500 return 0;
518} 501}
@@ -521,11 +504,11 @@ static int auto_accept_delay_get(void *data, u64 *val)
521{ 504{
522 struct hci_dev *hdev = data; 505 struct hci_dev *hdev = data;
523 506
524 hci_dev_lock_bh(hdev); 507 hci_dev_lock(hdev);
525 508
526 *val = hdev->auto_accept_delay; 509 *val = hdev->auto_accept_delay;
527 510
528 hci_dev_unlock_bh(hdev); 511 hci_dev_unlock(hdev);
529 512
530 return 0; 513 return 0;
531} 514}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3c2d888925d7..d478be11d562 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -795,11 +795,11 @@ static struct hci_conn *hidp_get_connection(struct hidp_session *session)
795 if (!hdev) 795 if (!hdev)
796 return NULL; 796 return NULL;
797 797
798 hci_dev_lock_bh(hdev); 798 hci_dev_lock(hdev);
799 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 799 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
800 if (conn) 800 if (conn)
801 hci_conn_hold_device(conn); 801 hci_conn_hold_device(conn);
802 hci_dev_unlock_bh(hdev); 802 hci_dev_unlock(hdev);
803 803
804 hci_dev_put(hdev); 804 hci_dev_put(hdev);
805 805
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 014fdec17113..ffa2f6b8408f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3,6 +3,7 @@
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc. 5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
6 7
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 9
@@ -89,24 +90,36 @@ static inline void chan_put(struct l2cap_chan *c)
89 90
90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 91static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91{ 92{
92 struct l2cap_chan *c; 93 struct l2cap_chan *c, *r = NULL;
93 94
94 list_for_each_entry(c, &conn->chan_l, list) { 95 rcu_read_lock();
95 if (c->dcid == cid) 96
96 return c; 97 list_for_each_entry_rcu(c, &conn->chan_l, list) {
98 if (c->dcid == cid) {
99 r = c;
100 break;
101 }
97 } 102 }
98 return NULL; 103
104 rcu_read_unlock();
105 return r;
99} 106}
100 107
101static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 108static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
102{ 109{
103 struct l2cap_chan *c; 110 struct l2cap_chan *c, *r = NULL;
104 111
105 list_for_each_entry(c, &conn->chan_l, list) { 112 rcu_read_lock();
106 if (c->scid == cid) 113
107 return c; 114 list_for_each_entry_rcu(c, &conn->chan_l, list) {
115 if (c->scid == cid) {
116 r = c;
117 break;
118 }
108 } 119 }
109 return NULL; 120
121 rcu_read_unlock();
122 return r;
110} 123}
111 124
112/* Find channel with given SCID. 125/* Find channel with given SCID.
@@ -115,34 +128,36 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
115{ 128{
116 struct l2cap_chan *c; 129 struct l2cap_chan *c;
117 130
118 read_lock(&conn->chan_lock);
119 c = __l2cap_get_chan_by_scid(conn, cid); 131 c = __l2cap_get_chan_by_scid(conn, cid);
120 if (c) 132 if (c)
121 bh_lock_sock(c->sk); 133 lock_sock(c->sk);
122 read_unlock(&conn->chan_lock);
123 return c; 134 return c;
124} 135}
125 136
126static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 137static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127{ 138{
128 struct l2cap_chan *c; 139 struct l2cap_chan *c, *r = NULL;
129 140
130 list_for_each_entry(c, &conn->chan_l, list) { 141 rcu_read_lock();
131 if (c->ident == ident) 142
132 return c; 143 list_for_each_entry_rcu(c, &conn->chan_l, list) {
144 if (c->ident == ident) {
145 r = c;
146 break;
147 }
133 } 148 }
134 return NULL; 149
150 rcu_read_unlock();
151 return r;
135} 152}
136 153
137static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 154static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
138{ 155{
139 struct l2cap_chan *c; 156 struct l2cap_chan *c;
140 157
141 read_lock(&conn->chan_lock);
142 c = __l2cap_get_chan_by_ident(conn, ident); 158 c = __l2cap_get_chan_by_ident(conn, ident);
143 if (c) 159 if (c)
144 bh_lock_sock(c->sk); 160 lock_sock(c->sk);
145 read_unlock(&conn->chan_lock);
146 return c; 161 return c;
147} 162}
148 163
@@ -213,20 +228,18 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
213 return 0; 228 return 0;
214} 229}
215 230
216static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) 231static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
217{ 232{
218 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout); 233 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
219 234
220 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) 235 cancel_delayed_work_sync(work);
221 chan_hold(chan); 236
237 schedule_delayed_work(work, timeout);
222} 238}
223 239
224static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer) 240static void l2cap_clear_timer(struct delayed_work *work)
225{ 241{
226 BT_DBG("chan %p state %d", chan, chan->state); 242 cancel_delayed_work_sync(work);
227
228 if (timer_pending(timer) && del_timer(timer))
229 chan_put(chan);
230} 243}
231 244
232static char *state_to_string(int state) 245static char *state_to_string(int state)
@@ -264,23 +277,16 @@ static void l2cap_state_change(struct l2cap_chan *chan, int state)
264 chan->ops->state_change(chan->data, state); 277 chan->ops->state_change(chan->data, state);
265} 278}
266 279
267static void l2cap_chan_timeout(unsigned long arg) 280static void l2cap_chan_timeout(struct work_struct *work)
268{ 281{
269 struct l2cap_chan *chan = (struct l2cap_chan *) arg; 282 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
283 chan_timer.work);
270 struct sock *sk = chan->sk; 284 struct sock *sk = chan->sk;
271 int reason; 285 int reason;
272 286
273 BT_DBG("chan %p state %d", chan, chan->state); 287 BT_DBG("chan %p state %d", chan, chan->state);
274 288
275 bh_lock_sock(sk); 289 lock_sock(sk);
276
277 if (sock_owned_by_user(sk)) {
278 /* sk is owned by user. Try again later */
279 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
280 bh_unlock_sock(sk);
281 chan_put(chan);
282 return;
283 }
284 290
285 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 291 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
286 reason = ECONNREFUSED; 292 reason = ECONNREFUSED;
@@ -292,7 +298,7 @@ static void l2cap_chan_timeout(unsigned long arg)
292 298
293 l2cap_chan_close(chan, reason); 299 l2cap_chan_close(chan, reason);
294 300
295 bh_unlock_sock(sk); 301 release_sock(sk);
296 302
297 chan->ops->close(chan->data); 303 chan->ops->close(chan->data);
298 chan_put(chan); 304 chan_put(chan);
@@ -312,7 +318,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
312 list_add(&chan->global_l, &chan_list); 318 list_add(&chan->global_l, &chan_list);
313 write_unlock_bh(&chan_list_lock); 319 write_unlock_bh(&chan_list_lock);
314 320
315 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan); 321 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
316 322
317 chan->state = BT_OPEN; 323 chan->state = BT_OPEN;
318 324
@@ -332,7 +338,7 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
332 chan_put(chan); 338 chan_put(chan);
333} 339}
334 340
335static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 341static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
336{ 342{
337 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 343 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
338 chan->psm, chan->dcid); 344 chan->psm, chan->dcid);
@@ -373,7 +379,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
373 379
374 chan_hold(chan); 380 chan_hold(chan);
375 381
376 list_add(&chan->list, &conn->chan_l); 382 list_add_rcu(&chan->list, &conn->chan_l);
377} 383}
378 384
379/* Delete channel. 385/* Delete channel.
@@ -390,9 +396,9 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
390 396
391 if (conn) { 397 if (conn) {
392 /* Delete from channel list */ 398 /* Delete from channel list */
393 write_lock_bh(&conn->chan_lock); 399 list_del_rcu(&chan->list);
394 list_del(&chan->list); 400 synchronize_rcu();
395 write_unlock_bh(&conn->chan_lock); 401
396 chan_put(chan); 402 chan_put(chan);
397 403
398 chan->conn = NULL; 404 chan->conn = NULL;
@@ -707,7 +713,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
707 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 713 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
708 conn->info_ident = l2cap_get_ident(conn); 714 conn->info_ident = l2cap_get_ident(conn);
709 715
710 mod_timer(&conn->info_timer, jiffies + 716 schedule_delayed_work(&conn->info_work,
711 msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); 717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
712 718
713 l2cap_send_cmd(conn, conn->info_ident, 719 l2cap_send_cmd(conn, conn->info_ident,
@@ -759,13 +765,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
759/* ---- L2CAP connections ---- */ 765/* ---- L2CAP connections ---- */
760static void l2cap_conn_start(struct l2cap_conn *conn) 766static void l2cap_conn_start(struct l2cap_conn *conn)
761{ 767{
762 struct l2cap_chan *chan, *tmp; 768 struct l2cap_chan *chan;
763 769
764 BT_DBG("conn %p", conn); 770 BT_DBG("conn %p", conn);
765 771
766 read_lock(&conn->chan_lock); 772 rcu_read_lock();
767 773
768 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { 774 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
769 struct sock *sk = chan->sk; 775 struct sock *sk = chan->sk;
770 776
771 bh_lock_sock(sk); 777 bh_lock_sock(sk);
@@ -789,9 +795,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
789 &chan->conf_state)) { 795 &chan->conf_state)) {
790 /* l2cap_chan_close() calls list_del(chan) 796 /* l2cap_chan_close() calls list_del(chan)
791 * so release the lock */ 797 * so release the lock */
792 read_unlock(&conn->chan_lock);
793 l2cap_chan_close(chan, ECONNRESET); 798 l2cap_chan_close(chan, ECONNRESET);
794 read_lock(&conn->chan_lock);
795 bh_unlock_sock(sk); 799 bh_unlock_sock(sk);
796 continue; 800 continue;
797 } 801 }
@@ -847,7 +851,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
847 bh_unlock_sock(sk); 851 bh_unlock_sock(sk);
848 } 852 }
849 853
850 read_unlock(&conn->chan_lock); 854 rcu_read_unlock();
851} 855}
852 856
853/* Find socket with cid and source bdaddr. 857/* Find socket with cid and source bdaddr.
@@ -898,7 +902,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
898 902
899 parent = pchan->sk; 903 parent = pchan->sk;
900 904
901 bh_lock_sock(parent); 905 lock_sock(parent);
902 906
903 /* Check for backlog size */ 907 /* Check for backlog size */
904 if (sk_acceptq_is_full(parent)) { 908 if (sk_acceptq_is_full(parent)) {
@@ -912,8 +916,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
912 916
913 sk = chan->sk; 917 sk = chan->sk;
914 918
915 write_lock_bh(&conn->chan_lock);
916
917 hci_conn_hold(conn->hcon); 919 hci_conn_hold(conn->hcon);
918 920
919 bacpy(&bt_sk(sk)->src, conn->src); 921 bacpy(&bt_sk(sk)->src, conn->src);
@@ -921,17 +923,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
921 923
922 bt_accept_enqueue(parent, sk); 924 bt_accept_enqueue(parent, sk);
923 925
924 __l2cap_chan_add(conn, chan); 926 l2cap_chan_add(conn, chan);
925 927
926 __set_chan_timer(chan, sk->sk_sndtimeo); 928 __set_chan_timer(chan, sk->sk_sndtimeo);
927 929
928 l2cap_state_change(chan, BT_CONNECTED); 930 l2cap_state_change(chan, BT_CONNECTED);
929 parent->sk_data_ready(parent, 0); 931 parent->sk_data_ready(parent, 0);
930 932
931 write_unlock_bh(&conn->chan_lock);
932
933clean: 933clean:
934 bh_unlock_sock(parent); 934 release_sock(parent);
935} 935}
936 936
937static void l2cap_chan_ready(struct sock *sk) 937static void l2cap_chan_ready(struct sock *sk)
@@ -963,9 +963,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
963 if (conn->hcon->out && conn->hcon->type == LE_LINK) 963 if (conn->hcon->out && conn->hcon->type == LE_LINK)
964 smp_conn_security(conn, conn->hcon->pending_sec_level); 964 smp_conn_security(conn, conn->hcon->pending_sec_level);
965 965
966 read_lock(&conn->chan_lock); 966 rcu_read_lock();
967 967
968 list_for_each_entry(chan, &conn->chan_l, list) { 968 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
969 struct sock *sk = chan->sk; 969 struct sock *sk = chan->sk;
970 970
971 bh_lock_sock(sk); 971 bh_lock_sock(sk);
@@ -985,7 +985,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
985 bh_unlock_sock(sk); 985 bh_unlock_sock(sk);
986 } 986 }
987 987
988 read_unlock(&conn->chan_lock); 988 rcu_read_unlock();
989} 989}
990 990
991/* Notify sockets that we cannot guaranty reliability anymore */ 991/* Notify sockets that we cannot guaranty reliability anymore */
@@ -995,21 +995,22 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
995 995
996 BT_DBG("conn %p", conn); 996 BT_DBG("conn %p", conn);
997 997
998 read_lock(&conn->chan_lock); 998 rcu_read_lock();
999 999
1000 list_for_each_entry(chan, &conn->chan_l, list) { 1000 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1001 struct sock *sk = chan->sk; 1001 struct sock *sk = chan->sk;
1002 1002
1003 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1003 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1004 sk->sk_err = err; 1004 sk->sk_err = err;
1005 } 1005 }
1006 1006
1007 read_unlock(&conn->chan_lock); 1007 rcu_read_unlock();
1008} 1008}
1009 1009
1010static void l2cap_info_timeout(unsigned long arg) 1010static void l2cap_info_timeout(struct work_struct *work)
1011{ 1011{
1012 struct l2cap_conn *conn = (void *) arg; 1012 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1013 info_work.work);
1013 1014
1014 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1015 conn->info_ident = 0; 1016 conn->info_ident = 0;
@@ -1033,16 +1034,16 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1033 /* Kill channels */ 1034 /* Kill channels */
1034 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1035 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1035 sk = chan->sk; 1036 sk = chan->sk;
1036 bh_lock_sock(sk); 1037 lock_sock(sk);
1037 l2cap_chan_del(chan, err); 1038 l2cap_chan_del(chan, err);
1038 bh_unlock_sock(sk); 1039 release_sock(sk);
1039 chan->ops->close(chan->data); 1040 chan->ops->close(chan->data);
1040 } 1041 }
1041 1042
1042 hci_chan_del(conn->hchan); 1043 hci_chan_del(conn->hchan);
1043 1044
1044 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1045 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1045 del_timer_sync(&conn->info_timer); 1046 cancel_delayed_work_sync(&conn->info_work);
1046 1047
1047 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1048 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1048 del_timer(&conn->security_timer); 1049 del_timer(&conn->security_timer);
@@ -1095,7 +1096,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1095 conn->feat_mask = 0; 1096 conn->feat_mask = 0;
1096 1097
1097 spin_lock_init(&conn->lock); 1098 spin_lock_init(&conn->lock);
1098 rwlock_init(&conn->chan_lock);
1099 1099
1100 INIT_LIST_HEAD(&conn->chan_l); 1100 INIT_LIST_HEAD(&conn->chan_l);
1101 1101
@@ -1103,21 +1103,13 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1103 setup_timer(&conn->security_timer, security_timeout, 1103 setup_timer(&conn->security_timer, security_timeout,
1104 (unsigned long) conn); 1104 (unsigned long) conn);
1105 else 1105 else
1106 setup_timer(&conn->info_timer, l2cap_info_timeout, 1106 INIT_DELAYED_WORK(&conn->info_work, l2cap_info_timeout);
1107 (unsigned long) conn);
1108 1107
1109 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 1108 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1110 1109
1111 return conn; 1110 return conn;
1112} 1111}
1113 1112
1114static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1115{
1116 write_lock_bh(&conn->chan_lock);
1117 __l2cap_chan_add(conn, chan);
1118 write_unlock_bh(&conn->chan_lock);
1119}
1120
1121/* ---- Socket interface ---- */ 1113/* ---- Socket interface ---- */
1122 1114
1123/* Find socket with psm and source bdaddr. 1115/* Find socket with psm and source bdaddr.
@@ -1153,11 +1145,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1153 return c1; 1145 return c1;
1154} 1146}
1155 1147
1156int l2cap_chan_connect(struct l2cap_chan *chan) 1148inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1157{ 1149{
1158 struct sock *sk = chan->sk; 1150 struct sock *sk = chan->sk;
1159 bdaddr_t *src = &bt_sk(sk)->src; 1151 bdaddr_t *src = &bt_sk(sk)->src;
1160 bdaddr_t *dst = &bt_sk(sk)->dst;
1161 struct l2cap_conn *conn; 1152 struct l2cap_conn *conn;
1162 struct hci_conn *hcon; 1153 struct hci_conn *hcon;
1163 struct hci_dev *hdev; 1154 struct hci_dev *hdev;
@@ -1171,7 +1162,62 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
1171 if (!hdev) 1162 if (!hdev)
1172 return -EHOSTUNREACH; 1163 return -EHOSTUNREACH;
1173 1164
1174 hci_dev_lock_bh(hdev); 1165 hci_dev_lock(hdev);
1166
1167 lock_sock(sk);
1168
1169 /* PSM must be odd and lsb of upper byte must be 0 */
1170 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1171 chan->chan_type != L2CAP_CHAN_RAW) {
1172 err = -EINVAL;
1173 goto done;
1174 }
1175
1176 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1177 err = -EINVAL;
1178 goto done;
1179 }
1180
1181 switch (chan->mode) {
1182 case L2CAP_MODE_BASIC:
1183 break;
1184 case L2CAP_MODE_ERTM:
1185 case L2CAP_MODE_STREAMING:
1186 if (!disable_ertm)
1187 break;
1188 /* fall through */
1189 default:
1190 err = -ENOTSUPP;
1191 goto done;
1192 }
1193
1194 switch (sk->sk_state) {
1195 case BT_CONNECT:
1196 case BT_CONNECT2:
1197 case BT_CONFIG:
1198 /* Already connecting */
1199 err = 0;
1200 goto done;
1201
1202 case BT_CONNECTED:
1203 /* Already connected */
1204 err = -EISCONN;
1205 goto done;
1206
1207 case BT_OPEN:
1208 case BT_BOUND:
1209 /* Can connect */
1210 break;
1211
1212 default:
1213 err = -EBADFD;
1214 goto done;
1215 }
1216
1217 /* Set destination address and psm */
1218 bacpy(&bt_sk(sk)->dst, src);
1219 chan->psm = psm;
1220 chan->dcid = cid;
1175 1221
1176 auth_type = l2cap_get_auth_type(chan); 1222 auth_type = l2cap_get_auth_type(chan);
1177 1223
@@ -1214,7 +1260,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
1214 err = 0; 1260 err = 0;
1215 1261
1216done: 1262done:
1217 hci_dev_unlock_bh(hdev); 1263 hci_dev_unlock(hdev);
1218 hci_dev_put(hdev); 1264 hci_dev_put(hdev);
1219 return err; 1265 return err;
1220} 1266}
@@ -1251,17 +1297,18 @@ int __l2cap_wait_ack(struct sock *sk)
1251 return err; 1297 return err;
1252} 1298}
1253 1299
1254static void l2cap_monitor_timeout(unsigned long arg) 1300static void l2cap_monitor_timeout(struct work_struct *work)
1255{ 1301{
1256 struct l2cap_chan *chan = (void *) arg; 1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 monitor_timer.work);
1257 struct sock *sk = chan->sk; 1304 struct sock *sk = chan->sk;
1258 1305
1259 BT_DBG("chan %p", chan); 1306 BT_DBG("chan %p", chan);
1260 1307
1261 bh_lock_sock(sk); 1308 lock_sock(sk);
1262 if (chan->retry_count >= chan->remote_max_tx) { 1309 if (chan->retry_count >= chan->remote_max_tx) {
1263 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1264 bh_unlock_sock(sk); 1311 release_sock(sk);
1265 return; 1312 return;
1266 } 1313 }
1267 1314
@@ -1269,24 +1316,25 @@ static void l2cap_monitor_timeout(unsigned long arg)
1269 __set_monitor_timer(chan); 1316 __set_monitor_timer(chan);
1270 1317
1271 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1272 bh_unlock_sock(sk); 1319 release_sock(sk);
1273} 1320}
1274 1321
1275static void l2cap_retrans_timeout(unsigned long arg) 1322static void l2cap_retrans_timeout(struct work_struct *work)
1276{ 1323{
1277 struct l2cap_chan *chan = (void *) arg; 1324 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1325 retrans_timer.work);
1278 struct sock *sk = chan->sk; 1326 struct sock *sk = chan->sk;
1279 1327
1280 BT_DBG("chan %p", chan); 1328 BT_DBG("chan %p", chan);
1281 1329
1282 bh_lock_sock(sk); 1330 lock_sock(sk);
1283 chan->retry_count = 1; 1331 chan->retry_count = 1;
1284 __set_monitor_timer(chan); 1332 __set_monitor_timer(chan);
1285 1333
1286 set_bit(CONN_WAIT_F, &chan->conn_state); 1334 set_bit(CONN_WAIT_F, &chan->conn_state);
1287 1335
1288 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1336 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1289 bh_unlock_sock(sk); 1337 release_sock(sk);
1290} 1338}
1291 1339
1292static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1340static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1778,8 +1826,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1778 1826
1779 BT_DBG("conn %p", conn); 1827 BT_DBG("conn %p", conn);
1780 1828
1781 read_lock(&conn->chan_lock); 1829 rcu_read_lock();
1782 list_for_each_entry(chan, &conn->chan_l, list) { 1830
1831 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1783 struct sock *sk = chan->sk; 1832 struct sock *sk = chan->sk;
1784 if (chan->chan_type != L2CAP_CHAN_RAW) 1833 if (chan->chan_type != L2CAP_CHAN_RAW)
1785 continue; 1834 continue;
@@ -1794,7 +1843,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1794 if (chan->ops->recv(chan->data, nskb)) 1843 if (chan->ops->recv(chan->data, nskb))
1795 kfree_skb(nskb); 1844 kfree_skb(nskb);
1796 } 1845 }
1797 read_unlock(&conn->chan_lock); 1846
1847 rcu_read_unlock();
1798} 1848}
1799 1849
1800/* ---- L2CAP signalling commands ---- */ 1850/* ---- L2CAP signalling commands ---- */
@@ -1955,37 +2005,31 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1955 (unsigned long) &efs); 2005 (unsigned long) &efs);
1956} 2006}
1957 2007
1958static void l2cap_ack_timeout(unsigned long arg) 2008static void l2cap_ack_timeout(struct work_struct *work)
1959{ 2009{
1960 struct l2cap_chan *chan = (void *) arg; 2010 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2011 ack_timer.work);
1961 2012
1962 bh_lock_sock(chan->sk); 2013 lock_sock(chan->sk);
1963 l2cap_send_ack(chan); 2014 l2cap_send_ack(chan);
1964 bh_unlock_sock(chan->sk); 2015 release_sock(chan->sk);
1965} 2016}
1966 2017
1967static inline void l2cap_ertm_init(struct l2cap_chan *chan) 2018static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1968{ 2019{
1969 struct sock *sk = chan->sk;
1970
1971 chan->expected_ack_seq = 0; 2020 chan->expected_ack_seq = 0;
1972 chan->unacked_frames = 0; 2021 chan->unacked_frames = 0;
1973 chan->buffer_seq = 0; 2022 chan->buffer_seq = 0;
1974 chan->num_acked = 0; 2023 chan->num_acked = 0;
1975 chan->frames_sent = 0; 2024 chan->frames_sent = 0;
1976 2025
1977 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout, 2026 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1978 (unsigned long) chan); 2027 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1979 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout, 2028 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1980 (unsigned long) chan);
1981 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1982 2029
1983 skb_queue_head_init(&chan->srej_q); 2030 skb_queue_head_init(&chan->srej_q);
1984 2031
1985 INIT_LIST_HEAD(&chan->srej_l); 2032 INIT_LIST_HEAD(&chan->srej_l);
1986
1987
1988 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1989} 2033}
1990 2034
1991static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2035static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2372,7 +2416,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2372 void *ptr = req->data; 2416 void *ptr = req->data;
2373 int type, olen; 2417 int type, olen;
2374 unsigned long val; 2418 unsigned long val;
2375 struct l2cap_conf_rfc rfc; 2419 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2376 struct l2cap_conf_efs efs; 2420 struct l2cap_conf_efs efs;
2377 2421
2378 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 2422 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2522,6 +2566,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2522 } 2566 }
2523 } 2567 }
2524 2568
2569 /* Use sane default values in case a misbehaving remote device
2570 * did not send an RFC option.
2571 */
2572 rfc.mode = chan->mode;
2573 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2574 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2575 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2576
2577 BT_ERR("Expected RFC option was not found, using defaults");
2578
2525done: 2579done:
2526 switch (rfc.mode) { 2580 switch (rfc.mode) {
2527 case L2CAP_MODE_ERTM: 2581 case L2CAP_MODE_ERTM:
@@ -2543,7 +2597,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
2543 2597
2544 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 2598 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2545 cmd->ident == conn->info_ident) { 2599 cmd->ident == conn->info_ident) {
2546 del_timer(&conn->info_timer); 2600 cancel_delayed_work_sync(&conn->info_work);
2547 2601
2548 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 2602 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2549 conn->info_ident = 0; 2603 conn->info_ident = 0;
@@ -2576,7 +2630,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2576 2630
2577 parent = pchan->sk; 2631 parent = pchan->sk;
2578 2632
2579 bh_lock_sock(parent); 2633 lock_sock(parent);
2580 2634
2581 /* Check if the ACL is secure enough (if not SDP) */ 2635 /* Check if the ACL is secure enough (if not SDP) */
2582 if (psm != cpu_to_le16(0x0001) && 2636 if (psm != cpu_to_le16(0x0001) &&
@@ -2600,11 +2654,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2600 2654
2601 sk = chan->sk; 2655 sk = chan->sk;
2602 2656
2603 write_lock_bh(&conn->chan_lock);
2604
2605 /* Check if we already have channel with that dcid */ 2657 /* Check if we already have channel with that dcid */
2606 if (__l2cap_get_chan_by_dcid(conn, scid)) { 2658 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2607 write_unlock_bh(&conn->chan_lock);
2608 sock_set_flag(sk, SOCK_ZAPPED); 2659 sock_set_flag(sk, SOCK_ZAPPED);
2609 chan->ops->close(chan->data); 2660 chan->ops->close(chan->data);
2610 goto response; 2661 goto response;
@@ -2619,7 +2670,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2619 2670
2620 bt_accept_enqueue(parent, sk); 2671 bt_accept_enqueue(parent, sk);
2621 2672
2622 __l2cap_chan_add(conn, chan); 2673 l2cap_chan_add(conn, chan);
2623 2674
2624 dcid = chan->scid; 2675 dcid = chan->scid;
2625 2676
@@ -2650,10 +2701,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2650 status = L2CAP_CS_NO_INFO; 2701 status = L2CAP_CS_NO_INFO;
2651 } 2702 }
2652 2703
2653 write_unlock_bh(&conn->chan_lock);
2654
2655response: 2704response:
2656 bh_unlock_sock(parent); 2705 release_sock(parent);
2657 2706
2658sendresp: 2707sendresp:
2659 rsp.scid = cpu_to_le16(scid); 2708 rsp.scid = cpu_to_le16(scid);
@@ -2669,7 +2718,7 @@ sendresp:
2669 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 2718 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2670 conn->info_ident = l2cap_get_ident(conn); 2719 conn->info_ident = l2cap_get_ident(conn);
2671 2720
2672 mod_timer(&conn->info_timer, jiffies + 2721 schedule_delayed_work(&conn->info_work,
2673 msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); 2722 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2674 2723
2675 l2cap_send_cmd(conn, conn->info_ident, 2724 l2cap_send_cmd(conn, conn->info_ident,
@@ -2735,19 +2784,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2735 break; 2784 break;
2736 2785
2737 default: 2786 default:
2738 /* don't delete l2cap channel if sk is owned by user */
2739 if (sock_owned_by_user(sk)) {
2740 l2cap_state_change(chan, BT_DISCONN);
2741 __clear_chan_timer(chan);
2742 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2743 break;
2744 }
2745
2746 l2cap_chan_del(chan, ECONNREFUSED); 2787 l2cap_chan_del(chan, ECONNREFUSED);
2747 break; 2788 break;
2748 } 2789 }
2749 2790
2750 bh_unlock_sock(sk); 2791 release_sock(sk);
2751 return 0; 2792 return 0;
2752} 2793}
2753 2794
@@ -2869,7 +2910,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2869 } 2910 }
2870 2911
2871unlock: 2912unlock:
2872 bh_unlock_sock(sk); 2913 release_sock(sk);
2873 return 0; 2914 return 0;
2874} 2915}
2875 2916
@@ -2976,7 +3017,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2976 } 3017 }
2977 3018
2978done: 3019done:
2979 bh_unlock_sock(sk); 3020 release_sock(sk);
2980 return 0; 3021 return 0;
2981} 3022}
2982 3023
@@ -3005,17 +3046,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3005 3046
3006 sk->sk_shutdown = SHUTDOWN_MASK; 3047 sk->sk_shutdown = SHUTDOWN_MASK;
3007 3048
3008 /* don't delete l2cap channel if sk is owned by user */
3009 if (sock_owned_by_user(sk)) {
3010 l2cap_state_change(chan, BT_DISCONN);
3011 __clear_chan_timer(chan);
3012 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3013 bh_unlock_sock(sk);
3014 return 0;
3015 }
3016
3017 l2cap_chan_del(chan, ECONNRESET); 3049 l2cap_chan_del(chan, ECONNRESET);
3018 bh_unlock_sock(sk); 3050 release_sock(sk);
3019 3051
3020 chan->ops->close(chan->data); 3052 chan->ops->close(chan->data);
3021 return 0; 3053 return 0;
@@ -3039,17 +3071,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3039 3071
3040 sk = chan->sk; 3072 sk = chan->sk;
3041 3073
3042 /* don't delete l2cap channel if sk is owned by user */
3043 if (sock_owned_by_user(sk)) {
3044 l2cap_state_change(chan, BT_DISCONN);
3045 __clear_chan_timer(chan);
3046 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3047 bh_unlock_sock(sk);
3048 return 0;
3049 }
3050
3051 l2cap_chan_del(chan, 0); 3074 l2cap_chan_del(chan, 0);
3052 bh_unlock_sock(sk); 3075 release_sock(sk);
3053 3076
3054 chan->ops->close(chan->data); 3077 chan->ops->close(chan->data);
3055 return 0; 3078 return 0;
@@ -3120,7 +3143,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3120 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 3143 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3121 return 0; 3144 return 0;
3122 3145
3123 del_timer(&conn->info_timer); 3146 cancel_delayed_work_sync(&conn->info_work);
3124 3147
3125 if (result != L2CAP_IR_SUCCESS) { 3148 if (result != L2CAP_IR_SUCCESS) {
3126 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3149 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -4237,12 +4260,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4237 break; 4260 break;
4238 4261
4239 case L2CAP_MODE_ERTM: 4262 case L2CAP_MODE_ERTM:
4240 if (!sock_owned_by_user(sk)) { 4263 l2cap_ertm_data_rcv(sk, skb);
4241 l2cap_ertm_data_rcv(sk, skb);
4242 } else {
4243 if (sk_add_backlog(sk, skb))
4244 goto drop;
4245 }
4246 4264
4247 goto done; 4265 goto done;
4248 4266
@@ -4292,7 +4310,7 @@ drop:
4292 4310
4293done: 4311done:
4294 if (sk) 4312 if (sk)
4295 bh_unlock_sock(sk); 4313 release_sock(sk);
4296 4314
4297 return 0; 4315 return 0;
4298} 4316}
@@ -4308,7 +4326,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4308 4326
4309 sk = chan->sk; 4327 sk = chan->sk;
4310 4328
4311 bh_lock_sock(sk); 4329 lock_sock(sk);
4312 4330
4313 BT_DBG("sk %p, len %d", sk, skb->len); 4331 BT_DBG("sk %p, len %d", sk, skb->len);
4314 4332
@@ -4326,7 +4344,7 @@ drop:
4326 4344
4327done: 4345done:
4328 if (sk) 4346 if (sk)
4329 bh_unlock_sock(sk); 4347 release_sock(sk);
4330 return 0; 4348 return 0;
4331} 4349}
4332 4350
@@ -4341,7 +4359,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
4341 4359
4342 sk = chan->sk; 4360 sk = chan->sk;
4343 4361
4344 bh_lock_sock(sk); 4362 lock_sock(sk);
4345 4363
4346 BT_DBG("sk %p, len %d", sk, skb->len); 4364 BT_DBG("sk %p, len %d", sk, skb->len);
4347 4365
@@ -4359,7 +4377,7 @@ drop:
4359 4377
4360done: 4378done:
4361 if (sk) 4379 if (sk)
4362 bh_unlock_sock(sk); 4380 release_sock(sk);
4363 return 0; 4381 return 0;
4364} 4382}
4365 4383
@@ -4518,9 +4536,9 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4518 del_timer(&conn->security_timer); 4536 del_timer(&conn->security_timer);
4519 } 4537 }
4520 4538
4521 read_lock(&conn->chan_lock); 4539 rcu_read_lock();
4522 4540
4523 list_for_each_entry(chan, &conn->chan_l, list) { 4541 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4524 struct sock *sk = chan->sk; 4542 struct sock *sk = chan->sk;
4525 4543
4526 bh_lock_sock(sk); 4544 bh_lock_sock(sk);
@@ -4598,7 +4616,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4598 bh_unlock_sock(sk); 4616 bh_unlock_sock(sk);
4599 } 4617 }
4600 4618
4601 read_unlock(&conn->chan_lock); 4619 rcu_read_unlock();
4602 4620
4603 return 0; 4621 return 0;
4604} 4622}
@@ -4664,11 +4682,11 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
4664 BT_ERR("Frame exceeding recv MTU (len %d, " 4682 BT_ERR("Frame exceeding recv MTU (len %d, "
4665 "MTU %d)", len, 4683 "MTU %d)", len,
4666 chan->imtu); 4684 chan->imtu);
4667 bh_unlock_sock(sk); 4685 release_sock(sk);
4668 l2cap_conn_unreliable(conn, ECOMM); 4686 l2cap_conn_unreliable(conn, ECOMM);
4669 goto drop; 4687 goto drop;
4670 } 4688 }
4671 bh_unlock_sock(sk); 4689 release_sock(sk);
4672 } 4690 }
4673 4691
4674 /* Allocate skb for the complete frame (with header) */ 4692 /* Allocate skb for the complete frame (with header) */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f73704321a77..9ca5616166f7 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -3,6 +3,7 @@
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc. 5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
6 7
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 9
@@ -122,70 +123,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
122 if (la.l2_cid && la.l2_psm) 123 if (la.l2_cid && la.l2_psm)
123 return -EINVAL; 124 return -EINVAL;
124 125
125 lock_sock(sk); 126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
126
127 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
128 && !(la.l2_psm || la.l2_cid)) {
129 err = -EINVAL;
130 goto done;
131 }
132
133 switch (chan->mode) {
134 case L2CAP_MODE_BASIC:
135 break;
136 case L2CAP_MODE_ERTM:
137 case L2CAP_MODE_STREAMING:
138 if (!disable_ertm)
139 break;
140 /* fall through */
141 default:
142 err = -ENOTSUPP;
143 goto done;
144 }
145
146 switch (sk->sk_state) {
147 case BT_CONNECT:
148 case BT_CONNECT2:
149 case BT_CONFIG:
150 /* Already connecting */
151 goto wait;
152
153 case BT_CONNECTED:
154 /* Already connected */
155 err = -EISCONN;
156 goto done;
157
158 case BT_OPEN:
159 case BT_BOUND:
160 /* Can connect */
161 break;
162
163 default:
164 err = -EBADFD;
165 goto done;
166 }
167
168 /* PSM must be odd and lsb of upper byte must be 0 */
169 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
170 chan->chan_type != L2CAP_CHAN_RAW) {
171 err = -EINVAL;
172 goto done;
173 }
174
175 /* Set destination address and psm */
176 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
177 chan->psm = la.l2_psm;
178 chan->dcid = la.l2_cid;
179
180 err = l2cap_chan_connect(l2cap_pi(sk)->chan);
181 if (err) 127 if (err)
182 goto done; 128 goto done;
183 129
184wait:
185 err = bt_sock_wait_state(sk, BT_CONNECTED, 130 err = bt_sock_wait_state(sk, BT_CONNECTED,
186 sock_sndtimeo(sk, flags & O_NONBLOCK)); 131 sock_sndtimeo(sk, flags & O_NONBLOCK));
187done: 132done:
188 release_sock(sk); 133 if (sock_owned_by_user(sk))
134 release_sock(sk);
189 return err; 135 return err;
190} 136}
191 137
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 1ce549bae241..fbcbef6ecceb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -36,6 +36,8 @@
36 36
37#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ 37#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
38 38
39#define SERVICE_CACHE_TIMEOUT (5 * 1000)
40
39struct pending_cmd { 41struct pending_cmd {
40 struct list_head list; 42 struct list_head list;
41 u16 opcode; 43 u16 opcode;
@@ -243,6 +245,262 @@ static int read_index_list(struct sock *sk)
243 return err; 245 return err;
244} 246}
245 247
248static u32 get_supported_settings(struct hci_dev *hdev)
249{
250 u32 settings = 0;
251
252 settings |= MGMT_SETTING_POWERED;
253 settings |= MGMT_SETTING_CONNECTABLE;
254 settings |= MGMT_SETTING_FAST_CONNECTABLE;
255 settings |= MGMT_SETTING_DISCOVERABLE;
256 settings |= MGMT_SETTING_PAIRABLE;
257
258 if (hdev->features[6] & LMP_SIMPLE_PAIR)
259 settings |= MGMT_SETTING_SSP;
260
261 if (!(hdev->features[4] & LMP_NO_BREDR)) {
262 settings |= MGMT_SETTING_BREDR;
263 settings |= MGMT_SETTING_LINK_SECURITY;
264 }
265
266 if (hdev->features[4] & LMP_LE)
267 settings |= MGMT_SETTING_LE;
268
269 return settings;
270}
271
272static u32 get_current_settings(struct hci_dev *hdev)
273{
274 u32 settings = 0;
275
276 if (test_bit(HCI_UP, &hdev->flags))
277 settings |= MGMT_SETTING_POWERED;
278 else
279 return settings;
280
281 if (test_bit(HCI_PSCAN, &hdev->flags))
282 settings |= MGMT_SETTING_CONNECTABLE;
283
284 if (test_bit(HCI_ISCAN, &hdev->flags))
285 settings |= MGMT_SETTING_DISCOVERABLE;
286
287 if (test_bit(HCI_PAIRABLE, &hdev->flags))
288 settings |= MGMT_SETTING_PAIRABLE;
289
290 if (!(hdev->features[4] & LMP_NO_BREDR))
291 settings |= MGMT_SETTING_BREDR;
292
293 if (hdev->extfeatures[0] & LMP_HOST_LE)
294 settings |= MGMT_SETTING_LE;
295
296 if (test_bit(HCI_AUTH, &hdev->flags))
297 settings |= MGMT_SETTING_LINK_SECURITY;
298
299 if (hdev->ssp_mode > 0)
300 settings |= MGMT_SETTING_SSP;
301
302 return settings;
303}
304
305#define EIR_FLAGS 0x01 /* flags */
306#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
307#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
308#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
309#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
310#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
311#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
312#define EIR_NAME_SHORT 0x08 /* shortened local name */
313#define EIR_NAME_COMPLETE 0x09 /* complete local name */
314#define EIR_TX_POWER 0x0A /* transmit power level */
315#define EIR_DEVICE_ID 0x10 /* device ID */
316
317#define PNP_INFO_SVCLASS_ID 0x1200
318
319static u8 bluetooth_base_uuid[] = {
320 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
321 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
322};
323
324static u16 get_uuid16(u8 *uuid128)
325{
326 u32 val;
327 int i;
328
329 for (i = 0; i < 12; i++) {
330 if (bluetooth_base_uuid[i] != uuid128[i])
331 return 0;
332 }
333
334 memcpy(&val, &uuid128[12], 4);
335
336 val = le32_to_cpu(val);
337 if (val > 0xffff)
338 return 0;
339
340 return (u16) val;
341}
342
343static void create_eir(struct hci_dev *hdev, u8 *data)
344{
345 u8 *ptr = data;
346 u16 eir_len = 0;
347 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
348 int i, truncated = 0;
349 struct bt_uuid *uuid;
350 size_t name_len;
351
352 name_len = strlen(hdev->dev_name);
353
354 if (name_len > 0) {
355 /* EIR Data type */
356 if (name_len > 48) {
357 name_len = 48;
358 ptr[1] = EIR_NAME_SHORT;
359 } else
360 ptr[1] = EIR_NAME_COMPLETE;
361
362 /* EIR Data length */
363 ptr[0] = name_len + 1;
364
365 memcpy(ptr + 2, hdev->dev_name, name_len);
366
367 eir_len += (name_len + 2);
368 ptr += (name_len + 2);
369 }
370
371 memset(uuid16_list, 0, sizeof(uuid16_list));
372
373 /* Group all UUID16 types */
374 list_for_each_entry(uuid, &hdev->uuids, list) {
375 u16 uuid16;
376
377 uuid16 = get_uuid16(uuid->uuid);
378 if (uuid16 == 0)
379 return;
380
381 if (uuid16 < 0x1100)
382 continue;
383
384 if (uuid16 == PNP_INFO_SVCLASS_ID)
385 continue;
386
387 /* Stop if not enough space to put next UUID */
388 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
389 truncated = 1;
390 break;
391 }
392
393 /* Check for duplicates */
394 for (i = 0; uuid16_list[i] != 0; i++)
395 if (uuid16_list[i] == uuid16)
396 break;
397
398 if (uuid16_list[i] == 0) {
399 uuid16_list[i] = uuid16;
400 eir_len += sizeof(u16);
401 }
402 }
403
404 if (uuid16_list[0] != 0) {
405 u8 *length = ptr;
406
407 /* EIR Data type */
408 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
409
410 ptr += 2;
411 eir_len += 2;
412
413 for (i = 0; uuid16_list[i] != 0; i++) {
414 *ptr++ = (uuid16_list[i] & 0x00ff);
415 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
416 }
417
418 /* EIR Data length */
419 *length = (i * sizeof(u16)) + 1;
420 }
421}
422
423static int update_eir(struct hci_dev *hdev)
424{
425 struct hci_cp_write_eir cp;
426
427 if (!(hdev->features[6] & LMP_EXT_INQ))
428 return 0;
429
430 if (hdev->ssp_mode == 0)
431 return 0;
432
433 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
434 return 0;
435
436 memset(&cp, 0, sizeof(cp));
437
438 create_eir(hdev, cp.data);
439
440 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
441 return 0;
442
443 memcpy(hdev->eir, cp.data, sizeof(cp.data));
444
445 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
446}
447
448static u8 get_service_classes(struct hci_dev *hdev)
449{
450 struct bt_uuid *uuid;
451 u8 val = 0;
452
453 list_for_each_entry(uuid, &hdev->uuids, list)
454 val |= uuid->svc_hint;
455
456 return val;
457}
458
459static int update_class(struct hci_dev *hdev)
460{
461 u8 cod[3];
462
463 BT_DBG("%s", hdev->name);
464
465 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
466 return 0;
467
468 cod[0] = hdev->minor_class;
469 cod[1] = hdev->major_class;
470 cod[2] = get_service_classes(hdev);
471
472 if (memcmp(cod, hdev->dev_class, 3) == 0)
473 return 0;
474
475 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
476}
477
478static void service_cache_off(struct work_struct *work)
479{
480 struct hci_dev *hdev = container_of(work, struct hci_dev,
481 service_cache.work);
482
483 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
484 return;
485
486 hci_dev_lock(hdev);
487
488 update_eir(hdev);
489 update_class(hdev);
490
491 hci_dev_unlock(hdev);
492}
493
494static void mgmt_init_hdev(struct hci_dev *hdev)
495{
496 if (!test_and_set_bit(HCI_MGMT, &hdev->flags))
497 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
498
499 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags))
500 schedule_delayed_work(&hdev->service_cache,
501 msecs_to_jiffies(SERVICE_CACHE_TIMEOUT));
502}
503
246static int read_controller_info(struct sock *sk, u16 index) 504static int read_controller_info(struct sock *sk, u16 index)
247{ 505{
248 struct mgmt_rp_read_info rp; 506 struct mgmt_rp_read_info rp;
@@ -258,36 +516,27 @@ static int read_controller_info(struct sock *sk, u16 index)
258 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 516 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
259 cancel_delayed_work_sync(&hdev->power_off); 517 cancel_delayed_work_sync(&hdev->power_off);
260 518
261 hci_dev_lock_bh(hdev); 519 hci_dev_lock(hdev);
262 520
263 set_bit(HCI_MGMT, &hdev->flags); 521 if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
522 mgmt_init_hdev(hdev);
264 523
265 memset(&rp, 0, sizeof(rp)); 524 memset(&rp, 0, sizeof(rp));
266 525
267 rp.type = hdev->dev_type; 526 bacpy(&rp.bdaddr, &hdev->bdaddr);
268 527
269 rp.powered = test_bit(HCI_UP, &hdev->flags); 528 rp.version = hdev->hci_ver;
270 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
271 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
272 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
273 529
274 if (test_bit(HCI_AUTH, &hdev->flags)) 530 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
275 rp.sec_mode = 3; 531
276 else if (hdev->ssp_mode > 0) 532 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
277 rp.sec_mode = 4; 533 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
278 else
279 rp.sec_mode = 2;
280 534
281 bacpy(&rp.bdaddr, &hdev->bdaddr);
282 memcpy(rp.features, hdev->features, 8);
283 memcpy(rp.dev_class, hdev->dev_class, 3); 535 memcpy(rp.dev_class, hdev->dev_class, 3);
284 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
285 rp.hci_ver = hdev->hci_ver;
286 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
287 536
288 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 537 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
289 538
290 hci_dev_unlock_bh(hdev); 539 hci_dev_unlock(hdev);
291 hci_dev_put(hdev); 540 hci_dev_put(hdev);
292 541
293 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 542 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -366,13 +615,11 @@ static void mgmt_pending_remove(struct pending_cmd *cmd)
366 mgmt_pending_free(cmd); 615 mgmt_pending_free(cmd);
367} 616}
368 617
369static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val) 618static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
370{ 619{
371 struct mgmt_mode rp; 620 __le32 settings = cpu_to_le32(get_current_settings(hdev));
372 621
373 rp.val = val; 622 return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings));
374
375 return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
376} 623}
377 624
378static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) 625static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -395,11 +642,11 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
395 return cmd_status(sk, index, MGMT_OP_SET_POWERED, 642 return cmd_status(sk, index, MGMT_OP_SET_POWERED,
396 MGMT_STATUS_INVALID_PARAMS); 643 MGMT_STATUS_INVALID_PARAMS);
397 644
398 hci_dev_lock_bh(hdev); 645 hci_dev_lock(hdev);
399 646
400 up = test_bit(HCI_UP, &hdev->flags); 647 up = test_bit(HCI_UP, &hdev->flags);
401 if ((cp->val && up) || (!cp->val && !up)) { 648 if ((cp->val && up) || (!cp->val && !up)) {
402 err = send_mode_rsp(sk, index, MGMT_OP_SET_POWERED, cp->val); 649 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
403 goto failed; 650 goto failed;
404 } 651 }
405 652
@@ -416,14 +663,14 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
416 } 663 }
417 664
418 if (cp->val) 665 if (cp->val)
419 queue_work(hdev->workqueue, &hdev->power_on); 666 schedule_work(&hdev->power_on);
420 else 667 else
421 queue_work(hdev->workqueue, &hdev->power_off.work); 668 schedule_work(&hdev->power_off.work);
422 669
423 err = 0; 670 err = 0;
424 671
425failed: 672failed:
426 hci_dev_unlock_bh(hdev); 673 hci_dev_unlock(hdev);
427 hci_dev_put(hdev); 674 hci_dev_put(hdev);
428 return err; 675 return err;
429} 676}
@@ -450,7 +697,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
450 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 697 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
451 MGMT_STATUS_INVALID_PARAMS); 698 MGMT_STATUS_INVALID_PARAMS);
452 699
453 hci_dev_lock_bh(hdev); 700 hci_dev_lock(hdev);
454 701
455 if (!test_bit(HCI_UP, &hdev->flags)) { 702 if (!test_bit(HCI_UP, &hdev->flags)) {
456 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 703 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
@@ -467,8 +714,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
467 714
468 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && 715 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
469 test_bit(HCI_PSCAN, &hdev->flags)) { 716 test_bit(HCI_PSCAN, &hdev->flags)) {
470 err = send_mode_rsp(sk, index, MGMT_OP_SET_DISCOVERABLE, 717 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
471 cp->val);
472 goto failed; 718 goto failed;
473 } 719 }
474 720
@@ -493,7 +739,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
493 hdev->discov_timeout = get_unaligned_le16(&cp->timeout); 739 hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
494 740
495failed: 741failed:
496 hci_dev_unlock_bh(hdev); 742 hci_dev_unlock(hdev);
497 hci_dev_put(hdev); 743 hci_dev_put(hdev);
498 744
499 return err; 745 return err;
@@ -521,7 +767,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
521 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 767 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
522 MGMT_STATUS_INVALID_PARAMS); 768 MGMT_STATUS_INVALID_PARAMS);
523 769
524 hci_dev_lock_bh(hdev); 770 hci_dev_lock(hdev);
525 771
526 if (!test_bit(HCI_UP, &hdev->flags)) { 772 if (!test_bit(HCI_UP, &hdev->flags)) {
527 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 773 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
@@ -537,8 +783,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
537 } 783 }
538 784
539 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { 785 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
540 err = send_mode_rsp(sk, index, MGMT_OP_SET_CONNECTABLE, 786 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
541 cp->val);
542 goto failed; 787 goto failed;
543 } 788 }
544 789
@@ -558,7 +803,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
558 mgmt_pending_remove(cmd); 803 mgmt_pending_remove(cmd);
559 804
560failed: 805failed:
561 hci_dev_unlock_bh(hdev); 806 hci_dev_unlock(hdev);
562 hci_dev_put(hdev); 807 hci_dev_put(hdev);
563 808
564 return err; 809 return err;
@@ -596,8 +841,9 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
596static int set_pairable(struct sock *sk, u16 index, unsigned char *data, 841static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
597 u16 len) 842 u16 len)
598{ 843{
599 struct mgmt_mode *cp, ev; 844 struct mgmt_mode *cp;
600 struct hci_dev *hdev; 845 struct hci_dev *hdev;
846 __le32 ev;
601 int err; 847 int err;
602 848
603 cp = (void *) data; 849 cp = (void *) data;
@@ -613,201 +859,28 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
613 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, 859 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
614 MGMT_STATUS_INVALID_PARAMS); 860 MGMT_STATUS_INVALID_PARAMS);
615 861
616 hci_dev_lock_bh(hdev); 862 hci_dev_lock(hdev);
617 863
618 if (cp->val) 864 if (cp->val)
619 set_bit(HCI_PAIRABLE, &hdev->flags); 865 set_bit(HCI_PAIRABLE, &hdev->flags);
620 else 866 else
621 clear_bit(HCI_PAIRABLE, &hdev->flags); 867 clear_bit(HCI_PAIRABLE, &hdev->flags);
622 868
623 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val); 869 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
624 if (err < 0) 870 if (err < 0)
625 goto failed; 871 goto failed;
626 872
627 ev.val = cp->val; 873 ev = cpu_to_le32(get_current_settings(hdev));
628 874
629 err = mgmt_event(MGMT_EV_PAIRABLE, hdev, &ev, sizeof(ev), sk); 875 err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk);
630 876
631failed: 877failed:
632 hci_dev_unlock_bh(hdev); 878 hci_dev_unlock(hdev);
633 hci_dev_put(hdev); 879 hci_dev_put(hdev);
634 880
635 return err; 881 return err;
636} 882}
637 883
638#define EIR_FLAGS 0x01 /* flags */
639#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
640#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
641#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
642#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
643#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
644#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
645#define EIR_NAME_SHORT 0x08 /* shortened local name */
646#define EIR_NAME_COMPLETE 0x09 /* complete local name */
647#define EIR_TX_POWER 0x0A /* transmit power level */
648#define EIR_DEVICE_ID 0x10 /* device ID */
649
650#define PNP_INFO_SVCLASS_ID 0x1200
651
652static u8 bluetooth_base_uuid[] = {
653 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
654 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
655};
656
657static u16 get_uuid16(u8 *uuid128)
658{
659 u32 val;
660 int i;
661
662 for (i = 0; i < 12; i++) {
663 if (bluetooth_base_uuid[i] != uuid128[i])
664 return 0;
665 }
666
667 memcpy(&val, &uuid128[12], 4);
668
669 val = le32_to_cpu(val);
670 if (val > 0xffff)
671 return 0;
672
673 return (u16) val;
674}
675
676static void create_eir(struct hci_dev *hdev, u8 *data)
677{
678 u8 *ptr = data;
679 u16 eir_len = 0;
680 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
681 int i, truncated = 0;
682 struct bt_uuid *uuid;
683 size_t name_len;
684
685 name_len = strlen(hdev->dev_name);
686
687 if (name_len > 0) {
688 /* EIR Data type */
689 if (name_len > 48) {
690 name_len = 48;
691 ptr[1] = EIR_NAME_SHORT;
692 } else
693 ptr[1] = EIR_NAME_COMPLETE;
694
695 /* EIR Data length */
696 ptr[0] = name_len + 1;
697
698 memcpy(ptr + 2, hdev->dev_name, name_len);
699
700 eir_len += (name_len + 2);
701 ptr += (name_len + 2);
702 }
703
704 memset(uuid16_list, 0, sizeof(uuid16_list));
705
706 /* Group all UUID16 types */
707 list_for_each_entry(uuid, &hdev->uuids, list) {
708 u16 uuid16;
709
710 uuid16 = get_uuid16(uuid->uuid);
711 if (uuid16 == 0)
712 return;
713
714 if (uuid16 < 0x1100)
715 continue;
716
717 if (uuid16 == PNP_INFO_SVCLASS_ID)
718 continue;
719
720 /* Stop if not enough space to put next UUID */
721 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
722 truncated = 1;
723 break;
724 }
725
726 /* Check for duplicates */
727 for (i = 0; uuid16_list[i] != 0; i++)
728 if (uuid16_list[i] == uuid16)
729 break;
730
731 if (uuid16_list[i] == 0) {
732 uuid16_list[i] = uuid16;
733 eir_len += sizeof(u16);
734 }
735 }
736
737 if (uuid16_list[0] != 0) {
738 u8 *length = ptr;
739
740 /* EIR Data type */
741 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
742
743 ptr += 2;
744 eir_len += 2;
745
746 for (i = 0; uuid16_list[i] != 0; i++) {
747 *ptr++ = (uuid16_list[i] & 0x00ff);
748 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
749 }
750
751 /* EIR Data length */
752 *length = (i * sizeof(u16)) + 1;
753 }
754}
755
756static int update_eir(struct hci_dev *hdev)
757{
758 struct hci_cp_write_eir cp;
759
760 if (!(hdev->features[6] & LMP_EXT_INQ))
761 return 0;
762
763 if (hdev->ssp_mode == 0)
764 return 0;
765
766 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
767 return 0;
768
769 memset(&cp, 0, sizeof(cp));
770
771 create_eir(hdev, cp.data);
772
773 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
774 return 0;
775
776 memcpy(hdev->eir, cp.data, sizeof(cp.data));
777
778 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
779}
780
781static u8 get_service_classes(struct hci_dev *hdev)
782{
783 struct bt_uuid *uuid;
784 u8 val = 0;
785
786 list_for_each_entry(uuid, &hdev->uuids, list)
787 val |= uuid->svc_hint;
788
789 return val;
790}
791
792static int update_class(struct hci_dev *hdev)
793{
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
799 return 0;
800
801 cod[0] = hdev->minor_class;
802 cod[1] = hdev->major_class;
803 cod[2] = get_service_classes(hdev);
804
805 if (memcmp(cod, hdev->dev_class, 3) == 0)
806 return 0;
807
808 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
809}
810
811static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 884static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
812{ 885{
813 struct mgmt_cp_add_uuid *cp; 886 struct mgmt_cp_add_uuid *cp;
@@ -828,7 +901,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
828 return cmd_status(sk, index, MGMT_OP_ADD_UUID, 901 return cmd_status(sk, index, MGMT_OP_ADD_UUID,
829 MGMT_STATUS_INVALID_PARAMS); 902 MGMT_STATUS_INVALID_PARAMS);
830 903
831 hci_dev_lock_bh(hdev); 904 hci_dev_lock(hdev);
832 905
833 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 906 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
834 if (!uuid) { 907 if (!uuid) {
@@ -852,7 +925,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
852 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 925 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
853 926
854failed: 927failed:
855 hci_dev_unlock_bh(hdev); 928 hci_dev_unlock(hdev);
856 hci_dev_put(hdev); 929 hci_dev_put(hdev);
857 930
858 return err; 931 return err;
@@ -879,7 +952,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
879 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, 952 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
880 MGMT_STATUS_INVALID_PARAMS); 953 MGMT_STATUS_INVALID_PARAMS);
881 954
882 hci_dev_lock_bh(hdev); 955 hci_dev_lock(hdev);
883 956
884 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 957 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
885 err = hci_uuids_clear(hdev); 958 err = hci_uuids_clear(hdev);
@@ -915,7 +988,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
915 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 988 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
916 989
917unlock: 990unlock:
918 hci_dev_unlock_bh(hdev); 991 hci_dev_unlock(hdev);
919 hci_dev_put(hdev); 992 hci_dev_put(hdev);
920 993
921 return err; 994 return err;
@@ -941,62 +1014,24 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
941 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, 1014 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
942 MGMT_STATUS_INVALID_PARAMS); 1015 MGMT_STATUS_INVALID_PARAMS);
943 1016
944 hci_dev_lock_bh(hdev); 1017 hci_dev_lock(hdev);
945 1018
946 hdev->major_class = cp->major; 1019 hdev->major_class = cp->major;
947 hdev->minor_class = cp->minor; 1020 hdev->minor_class = cp->minor;
948 1021
1022 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) {
1023 hci_dev_unlock(hdev);
1024 cancel_delayed_work_sync(&hdev->service_cache);
1025 hci_dev_lock(hdev);
1026 update_eir(hdev);
1027 }
1028
949 err = update_class(hdev); 1029 err = update_class(hdev);
950 1030
951 if (err == 0) 1031 if (err == 0)
952 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 1032 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
953 1033
954 hci_dev_unlock_bh(hdev); 1034 hci_dev_unlock(hdev);
955 hci_dev_put(hdev);
956
957 return err;
958}
959
960static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
961 u16 len)
962{
963 struct hci_dev *hdev;
964 struct mgmt_cp_set_service_cache *cp;
965 int err;
966
967 cp = (void *) data;
968
969 if (len != sizeof(*cp))
970 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
971 MGMT_STATUS_INVALID_PARAMS);
972
973 hdev = hci_dev_get(index);
974 if (!hdev)
975 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
976 MGMT_STATUS_INVALID_PARAMS);
977
978 hci_dev_lock_bh(hdev);
979
980 BT_DBG("hci%u enable %d", index, cp->enable);
981
982 if (cp->enable) {
983 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
984 err = 0;
985 } else {
986 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
987 err = update_class(hdev);
988 if (err == 0)
989 err = update_eir(hdev);
990 }
991
992 if (err == 0)
993 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
994 0);
995 else
996 cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, -err);
997
998
999 hci_dev_unlock_bh(hdev);
1000 hci_dev_put(hdev); 1035 hci_dev_put(hdev);
1001 1036
1002 return err; 1037 return err;
@@ -1035,7 +1070,7 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
1035 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 1070 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
1036 key_count); 1071 key_count);
1037 1072
1038 hci_dev_lock_bh(hdev); 1073 hci_dev_lock(hdev);
1039 1074
1040 hci_link_keys_clear(hdev); 1075 hci_link_keys_clear(hdev);
1041 1076
@@ -1055,7 +1090,7 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
1055 1090
1056 cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0); 1091 cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
1057 1092
1058 hci_dev_unlock_bh(hdev); 1093 hci_dev_unlock(hdev);
1059 hci_dev_put(hdev); 1094 hci_dev_put(hdev);
1060 1095
1061 return 0; 1096 return 0;
@@ -1083,7 +1118,7 @@ static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
1083 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, 1118 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
1084 MGMT_STATUS_INVALID_PARAMS); 1119 MGMT_STATUS_INVALID_PARAMS);
1085 1120
1086 hci_dev_lock_bh(hdev); 1121 hci_dev_lock(hdev);
1087 1122
1088 memset(&rp, 0, sizeof(rp)); 1123 memset(&rp, 0, sizeof(rp));
1089 bacpy(&rp.bdaddr, &cp->bdaddr); 1124 bacpy(&rp.bdaddr, &cp->bdaddr);
@@ -1124,7 +1159,7 @@ unlock:
1124 if (err < 0) 1159 if (err < 0)
1125 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, 1160 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
1126 sizeof(rp)); 1161 sizeof(rp));
1127 hci_dev_unlock_bh(hdev); 1162 hci_dev_unlock(hdev);
1128 hci_dev_put(hdev); 1163 hci_dev_put(hdev);
1129 1164
1130 return err; 1165 return err;
@@ -1152,7 +1187,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1152 return cmd_status(sk, index, MGMT_OP_DISCONNECT, 1187 return cmd_status(sk, index, MGMT_OP_DISCONNECT,
1153 MGMT_STATUS_INVALID_PARAMS); 1188 MGMT_STATUS_INVALID_PARAMS);
1154 1189
1155 hci_dev_lock_bh(hdev); 1190 hci_dev_lock(hdev);
1156 1191
1157 if (!test_bit(HCI_UP, &hdev->flags)) { 1192 if (!test_bit(HCI_UP, &hdev->flags)) {
1158 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1193 err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
@@ -1190,7 +1225,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1190 mgmt_pending_remove(cmd); 1225 mgmt_pending_remove(cmd);
1191 1226
1192failed: 1227failed:
1193 hci_dev_unlock_bh(hdev); 1228 hci_dev_unlock(hdev);
1194 hci_dev_put(hdev); 1229 hci_dev_put(hdev);
1195 1230
1196 return err; 1231 return err;
@@ -1232,7 +1267,7 @@ static int get_connections(struct sock *sk, u16 index)
1232 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, 1267 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
1233 MGMT_STATUS_INVALID_PARAMS); 1268 MGMT_STATUS_INVALID_PARAMS);
1234 1269
1235 hci_dev_lock_bh(hdev); 1270 hci_dev_lock(hdev);
1236 1271
1237 count = 0; 1272 count = 0;
1238 list_for_each(p, &hdev->conn_hash.list) { 1273 list_for_each(p, &hdev->conn_hash.list) {
@@ -1264,7 +1299,7 @@ static int get_connections(struct sock *sk, u16 index)
1264 1299
1265unlock: 1300unlock:
1266 kfree(rp); 1301 kfree(rp);
1267 hci_dev_unlock_bh(hdev); 1302 hci_dev_unlock(hdev);
1268 hci_dev_put(hdev); 1303 hci_dev_put(hdev);
1269 return err; 1304 return err;
1270} 1305}
@@ -1312,7 +1347,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1312 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1347 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1313 MGMT_STATUS_INVALID_PARAMS); 1348 MGMT_STATUS_INVALID_PARAMS);
1314 1349
1315 hci_dev_lock_bh(hdev); 1350 hci_dev_lock(hdev);
1316 1351
1317 if (!test_bit(HCI_UP, &hdev->flags)) { 1352 if (!test_bit(HCI_UP, &hdev->flags)) {
1318 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1353 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
@@ -1355,7 +1390,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1355 mgmt_pending_remove(cmd); 1390 mgmt_pending_remove(cmd);
1356 1391
1357failed: 1392failed:
1358 hci_dev_unlock_bh(hdev); 1393 hci_dev_unlock(hdev);
1359 hci_dev_put(hdev); 1394 hci_dev_put(hdev);
1360 1395
1361 return err; 1396 return err;
@@ -1381,7 +1416,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1381 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1416 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1382 MGMT_STATUS_INVALID_PARAMS); 1417 MGMT_STATUS_INVALID_PARAMS);
1383 1418
1384 hci_dev_lock_bh(hdev); 1419 hci_dev_lock(hdev);
1385 1420
1386 if (!test_bit(HCI_UP, &hdev->flags)) { 1421 if (!test_bit(HCI_UP, &hdev->flags)) {
1387 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1422 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1392,7 +1427,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1392 err = send_pin_code_neg_reply(sk, index, hdev, cp); 1427 err = send_pin_code_neg_reply(sk, index, hdev, cp);
1393 1428
1394failed: 1429failed:
1395 hci_dev_unlock_bh(hdev); 1430 hci_dev_unlock(hdev);
1396 hci_dev_put(hdev); 1431 hci_dev_put(hdev);
1397 1432
1398 return err; 1433 return err;
@@ -1417,14 +1452,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1417 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, 1452 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
1418 MGMT_STATUS_INVALID_PARAMS); 1453 MGMT_STATUS_INVALID_PARAMS);
1419 1454
1420 hci_dev_lock_bh(hdev); 1455 hci_dev_lock(hdev);
1421 1456
1422 hdev->io_capability = cp->io_capability; 1457 hdev->io_capability = cp->io_capability;
1423 1458
1424 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1459 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1425 hdev->io_capability); 1460 hdev->io_capability);
1426 1461
1427 hci_dev_unlock_bh(hdev); 1462 hci_dev_unlock(hdev);
1428 hci_dev_put(hdev); 1463 hci_dev_put(hdev);
1429 1464
1430 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1465 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1505,7 +1540,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1505 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, 1540 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
1506 MGMT_STATUS_INVALID_PARAMS); 1541 MGMT_STATUS_INVALID_PARAMS);
1507 1542
1508 hci_dev_lock_bh(hdev); 1543 hci_dev_lock(hdev);
1509 1544
1510 sec_level = BT_SECURITY_MEDIUM; 1545 sec_level = BT_SECURITY_MEDIUM;
1511 if (cp->io_cap == 0x03) 1546 if (cp->io_cap == 0x03)
@@ -1562,7 +1597,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1562 err = 0; 1597 err = 0;
1563 1598
1564unlock: 1599unlock:
1565 hci_dev_unlock_bh(hdev); 1600 hci_dev_unlock(hdev);
1566 hci_dev_put(hdev); 1601 hci_dev_put(hdev);
1567 1602
1568 return err; 1603 return err;
@@ -1581,7 +1616,7 @@ static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
1581 return cmd_status(sk, index, mgmt_op, 1616 return cmd_status(sk, index, mgmt_op,
1582 MGMT_STATUS_INVALID_PARAMS); 1617 MGMT_STATUS_INVALID_PARAMS);
1583 1618
1584 hci_dev_lock_bh(hdev); 1619 hci_dev_lock(hdev);
1585 1620
1586 if (!test_bit(HCI_UP, &hdev->flags)) { 1621 if (!test_bit(HCI_UP, &hdev->flags)) {
1587 err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED); 1622 err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
@@ -1632,7 +1667,7 @@ static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
1632 mgmt_pending_remove(cmd); 1667 mgmt_pending_remove(cmd);
1633 1668
1634done: 1669done:
1635 hci_dev_unlock_bh(hdev); 1670 hci_dev_unlock(hdev);
1636 hci_dev_put(hdev); 1671 hci_dev_put(hdev);
1637 1672
1638 return err; 1673 return err;
@@ -1656,7 +1691,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
1656static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data, 1691static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
1657 u16 len) 1692 u16 len)
1658{ 1693{
1659 struct mgmt_cp_user_confirm_reply *cp = (void *) data; 1694 struct mgmt_cp_user_confirm_neg_reply *cp = data;
1660 1695
1661 BT_DBG(""); 1696 BT_DBG("");
1662 1697
@@ -1720,7 +1755,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1720 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, 1755 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
1721 MGMT_STATUS_INVALID_PARAMS); 1756 MGMT_STATUS_INVALID_PARAMS);
1722 1757
1723 hci_dev_lock_bh(hdev); 1758 hci_dev_lock(hdev);
1724 1759
1725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); 1760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
1726 if (!cmd) { 1761 if (!cmd) {
@@ -1735,7 +1770,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1735 mgmt_pending_remove(cmd); 1770 mgmt_pending_remove(cmd);
1736 1771
1737failed: 1772failed:
1738 hci_dev_unlock_bh(hdev); 1773 hci_dev_unlock(hdev);
1739 hci_dev_put(hdev); 1774 hci_dev_put(hdev);
1740 1775
1741 return err; 1776 return err;
@@ -1754,7 +1789,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1754 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1789 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1755 MGMT_STATUS_INVALID_PARAMS); 1790 MGMT_STATUS_INVALID_PARAMS);
1756 1791
1757 hci_dev_lock_bh(hdev); 1792 hci_dev_lock(hdev);
1758 1793
1759 if (!test_bit(HCI_UP, &hdev->flags)) { 1794 if (!test_bit(HCI_UP, &hdev->flags)) {
1760 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1795 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1785,7 +1820,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1785 mgmt_pending_remove(cmd); 1820 mgmt_pending_remove(cmd);
1786 1821
1787unlock: 1822unlock:
1788 hci_dev_unlock_bh(hdev); 1823 hci_dev_unlock(hdev);
1789 hci_dev_put(hdev); 1824 hci_dev_put(hdev);
1790 1825
1791 return err; 1826 return err;
@@ -1809,7 +1844,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1809 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 1844 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1810 MGMT_STATUS_INVALID_PARAMS); 1845 MGMT_STATUS_INVALID_PARAMS);
1811 1846
1812 hci_dev_lock_bh(hdev); 1847 hci_dev_lock(hdev);
1813 1848
1814 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 1849 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1815 cp->randomizer); 1850 cp->randomizer);
@@ -1820,7 +1855,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1820 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 1855 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1821 0); 1856 0);
1822 1857
1823 hci_dev_unlock_bh(hdev); 1858 hci_dev_unlock(hdev);
1824 hci_dev_put(hdev); 1859 hci_dev_put(hdev);
1825 1860
1826 return err; 1861 return err;
@@ -1844,7 +1879,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1844 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1879 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1845 MGMT_STATUS_INVALID_PARAMS); 1880 MGMT_STATUS_INVALID_PARAMS);
1846 1881
1847 hci_dev_lock_bh(hdev); 1882 hci_dev_lock(hdev);
1848 1883
1849 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 1884 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1850 if (err < 0) 1885 if (err < 0)
@@ -1854,7 +1889,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1854 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1889 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1855 NULL, 0); 1890 NULL, 0);
1856 1891
1857 hci_dev_unlock_bh(hdev); 1892 hci_dev_unlock(hdev);
1858 hci_dev_put(hdev); 1893 hci_dev_put(hdev);
1859 1894
1860 return err; 1895 return err;
@@ -1879,7 +1914,7 @@ static int start_discovery(struct sock *sk, u16 index,
1879 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, 1914 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
1880 MGMT_STATUS_INVALID_PARAMS); 1915 MGMT_STATUS_INVALID_PARAMS);
1881 1916
1882 hci_dev_lock_bh(hdev); 1917 hci_dev_lock(hdev);
1883 1918
1884 if (!test_bit(HCI_UP, &hdev->flags)) { 1919 if (!test_bit(HCI_UP, &hdev->flags)) {
1885 err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, 1920 err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
@@ -1898,7 +1933,7 @@ static int start_discovery(struct sock *sk, u16 index,
1898 mgmt_pending_remove(cmd); 1933 mgmt_pending_remove(cmd);
1899 1934
1900failed: 1935failed:
1901 hci_dev_unlock_bh(hdev); 1936 hci_dev_unlock(hdev);
1902 hci_dev_put(hdev); 1937 hci_dev_put(hdev);
1903 1938
1904 return err; 1939 return err;
@@ -1917,7 +1952,7 @@ static int stop_discovery(struct sock *sk, u16 index)
1917 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, 1952 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
1918 MGMT_STATUS_INVALID_PARAMS); 1953 MGMT_STATUS_INVALID_PARAMS);
1919 1954
1920 hci_dev_lock_bh(hdev); 1955 hci_dev_lock(hdev);
1921 1956
1922 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); 1957 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
1923 if (!cmd) { 1958 if (!cmd) {
@@ -1930,7 +1965,7 @@ static int stop_discovery(struct sock *sk, u16 index)
1930 mgmt_pending_remove(cmd); 1965 mgmt_pending_remove(cmd);
1931 1966
1932failed: 1967failed:
1933 hci_dev_unlock_bh(hdev); 1968 hci_dev_unlock(hdev);
1934 hci_dev_put(hdev); 1969 hci_dev_put(hdev);
1935 1970
1936 return err; 1971 return err;
@@ -1954,7 +1989,7 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1954 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 1989 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1955 MGMT_STATUS_INVALID_PARAMS); 1990 MGMT_STATUS_INVALID_PARAMS);
1956 1991
1957 hci_dev_lock_bh(hdev); 1992 hci_dev_lock(hdev);
1958 1993
1959 err = hci_blacklist_add(hdev, &cp->bdaddr); 1994 err = hci_blacklist_add(hdev, &cp->bdaddr);
1960 if (err < 0) 1995 if (err < 0)
@@ -1964,7 +1999,7 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
1964 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 1999 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1965 NULL, 0); 2000 NULL, 0);
1966 2001
1967 hci_dev_unlock_bh(hdev); 2002 hci_dev_unlock(hdev);
1968 hci_dev_put(hdev); 2003 hci_dev_put(hdev);
1969 2004
1970 return err; 2005 return err;
@@ -1988,7 +2023,7 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1988 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2023 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1989 MGMT_STATUS_INVALID_PARAMS); 2024 MGMT_STATUS_INVALID_PARAMS);
1990 2025
1991 hci_dev_lock_bh(hdev); 2026 hci_dev_lock(hdev);
1992 2027
1993 err = hci_blacklist_del(hdev, &cp->bdaddr); 2028 err = hci_blacklist_del(hdev, &cp->bdaddr);
1994 2029
@@ -1999,7 +2034,7 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1999 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2034 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
2000 NULL, 0); 2035 NULL, 0);
2001 2036
2002 hci_dev_unlock_bh(hdev); 2037 hci_dev_unlock(hdev);
2003 hci_dev_put(hdev); 2038 hci_dev_put(hdev);
2004 2039
2005 return err; 2040 return err;
@@ -2009,7 +2044,7 @@ static int set_fast_connectable(struct sock *sk, u16 index,
2009 unsigned char *data, u16 len) 2044 unsigned char *data, u16 len)
2010{ 2045{
2011 struct hci_dev *hdev; 2046 struct hci_dev *hdev;
2012 struct mgmt_cp_set_fast_connectable *cp = (void *) data; 2047 struct mgmt_mode *cp = (void *) data;
2013 struct hci_cp_write_page_scan_activity acp; 2048 struct hci_cp_write_page_scan_activity acp;
2014 u8 type; 2049 u8 type;
2015 int err; 2050 int err;
@@ -2027,7 +2062,7 @@ static int set_fast_connectable(struct sock *sk, u16 index,
2027 2062
2028 hci_dev_lock(hdev); 2063 hci_dev_lock(hdev);
2029 2064
2030 if (cp->enable) { 2065 if (cp->val) {
2031 type = PAGE_SCAN_TYPE_INTERLACED; 2066 type = PAGE_SCAN_TYPE_INTERLACED;
2032 acp.interval = 0x0024; /* 22.5 msec page scan interval */ 2067 acp.interval = 0x0024; /* 22.5 msec page scan interval */
2033 } else { 2068 } else {
@@ -2111,6 +2146,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2111 case MGMT_OP_SET_CONNECTABLE: 2146 case MGMT_OP_SET_CONNECTABLE:
2112 err = set_connectable(sk, index, buf + sizeof(*hdr), len); 2147 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
2113 break; 2148 break;
2149 case MGMT_OP_SET_FAST_CONNECTABLE:
2150 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
2151 len);
2152 break;
2114 case MGMT_OP_SET_PAIRABLE: 2153 case MGMT_OP_SET_PAIRABLE:
2115 err = set_pairable(sk, index, buf + sizeof(*hdr), len); 2154 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
2116 break; 2155 break;
@@ -2123,9 +2162,6 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2123 case MGMT_OP_SET_DEV_CLASS: 2162 case MGMT_OP_SET_DEV_CLASS:
2124 err = set_dev_class(sk, index, buf + sizeof(*hdr), len); 2163 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
2125 break; 2164 break;
2126 case MGMT_OP_SET_SERVICE_CACHE:
2127 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
2128 break;
2129 case MGMT_OP_LOAD_LINK_KEYS: 2165 case MGMT_OP_LOAD_LINK_KEYS:
2130 err = load_link_keys(sk, index, buf + sizeof(*hdr), len); 2166 err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
2131 break; 2167 break;
@@ -2189,10 +2225,6 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2189 case MGMT_OP_UNBLOCK_DEVICE: 2225 case MGMT_OP_UNBLOCK_DEVICE:
2190 err = unblock_device(sk, index, buf + sizeof(*hdr), len); 2226 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
2191 break; 2227 break;
2192 case MGMT_OP_SET_FAST_CONNECTABLE:
2193 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
2194 len);
2195 break;
2196 default: 2228 default:
2197 BT_DBG("Unknown op %u", opcode); 2229 BT_DBG("Unknown op %u", opcode);
2198 err = cmd_status(sk, index, opcode, 2230 err = cmd_status(sk, index, opcode,
@@ -2235,17 +2267,14 @@ int mgmt_index_removed(struct hci_dev *hdev)
2235struct cmd_lookup { 2267struct cmd_lookup {
2236 u8 val; 2268 u8 val;
2237 struct sock *sk; 2269 struct sock *sk;
2270 struct hci_dev *hdev;
2238}; 2271};
2239 2272
2240static void mode_rsp(struct pending_cmd *cmd, void *data) 2273static void settings_rsp(struct pending_cmd *cmd, void *data)
2241{ 2274{
2242 struct mgmt_mode *cp = cmd->param;
2243 struct cmd_lookup *match = data; 2275 struct cmd_lookup *match = data;
2244 2276
2245 if (cp->val != match->val) 2277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
2246 return;
2247
2248 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
2249 2278
2250 list_del(&cmd->list); 2279 list_del(&cmd->list);
2251 2280
@@ -2259,20 +2288,21 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
2259 2288
2260int mgmt_powered(struct hci_dev *hdev, u8 powered) 2289int mgmt_powered(struct hci_dev *hdev, u8 powered)
2261{ 2290{
2262 struct mgmt_mode ev; 2291 struct cmd_lookup match = { powered, NULL, hdev };
2263 struct cmd_lookup match = { powered, NULL }; 2292 __le32 ev;
2264 int ret; 2293 int ret;
2265 2294
2266 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, mode_rsp, &match); 2295 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2267 2296
2268 if (!powered) { 2297 if (!powered) {
2269 u8 status = ENETDOWN; 2298 u8 status = ENETDOWN;
2270 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2299 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2271 } 2300 }
2272 2301
2273 ev.val = powered; 2302 ev = cpu_to_le32(get_current_settings(hdev));
2274 2303
2275 ret = mgmt_event(MGMT_EV_POWERED, hdev, &ev, sizeof(ev), match.sk); 2304 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2305 match.sk);
2276 2306
2277 if (match.sk) 2307 if (match.sk)
2278 sock_put(match.sk); 2308 sock_put(match.sk);
@@ -2282,17 +2312,16 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2282 2312
2283int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 2313int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2284{ 2314{
2285 struct mgmt_mode ev; 2315 struct cmd_lookup match = { discoverable, NULL, hdev };
2286 struct cmd_lookup match = { discoverable, NULL }; 2316 __le32 ev;
2287 int ret; 2317 int ret;
2288 2318
2289 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, mode_rsp, &match); 2319 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match);
2290 2320
2291 ev.val = discoverable; 2321 ev = cpu_to_le32(get_current_settings(hdev));
2292 2322
2293 ret = mgmt_event(MGMT_EV_DISCOVERABLE, hdev, &ev, sizeof(ev), 2323 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2294 match.sk); 2324 match.sk);
2295
2296 if (match.sk) 2325 if (match.sk)
2297 sock_put(match.sk); 2326 sock_put(match.sk);
2298 2327
@@ -2301,15 +2330,16 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2301 2330
2302int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 2331int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2303{ 2332{
2304 struct mgmt_mode ev; 2333 __le32 ev;
2305 struct cmd_lookup match = { connectable, NULL }; 2334 struct cmd_lookup match = { connectable, NULL, hdev };
2306 int ret; 2335 int ret;
2307 2336
2308 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, mode_rsp, &match); 2337 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
2338 &match);
2309 2339
2310 ev.val = connectable; 2340 ev = cpu_to_le32(get_current_settings(hdev));
2311 2341
2312 ret = mgmt_event(MGMT_EV_CONNECTABLE, hdev, &ev, sizeof(ev), match.sk); 2342 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk);
2313 2343
2314 if (match.sk) 2344 if (match.sk)
2315 sock_put(match.sk); 2345 sock_put(match.sk);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8743f369ed3f..be6288cf854a 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1162,6 +1162,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1162 if (list_empty(&s->dlcs)) { 1162 if (list_empty(&s->dlcs)) {
1163 s->state = BT_DISCONN; 1163 s->state = BT_DISCONN;
1164 rfcomm_send_disc(s, 0); 1164 rfcomm_send_disc(s, 0);
1165 rfcomm_session_clear_timer(s);
1165 } 1166 }
1166 1167
1167 break; 1168 break;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a324b009e34b..725e10d487f2 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
189 if (!hdev) 189 if (!hdev)
190 return -EHOSTUNREACH; 190 return -EHOSTUNREACH;
191 191
192 hci_dev_lock_bh(hdev); 192 hci_dev_lock(hdev);
193 193
194 if (lmp_esco_capable(hdev) && !disable_esco) 194 if (lmp_esco_capable(hdev) && !disable_esco)
195 type = ESCO_LINK; 195 type = ESCO_LINK;
@@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk)
225 } 225 }
226 226
227done: 227done:
228 hci_dev_unlock_bh(hdev); 228 hci_dev_unlock(hdev);
229 hci_dev_put(hdev); 229 hci_dev_put(hdev);
230 return err; 230 return err;
231} 231}