diff options
Diffstat (limited to 'net/bluetooth/hci_core.c')
-rw-r--r-- | net/bluetooth/hci_core.c | 399 |
1 files changed, 303 insertions, 96 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index be84ae33ae3..fb3feeb185d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -319,8 +319,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) | |||
319 | * Device is held on return. */ | 319 | * Device is held on return. */ |
320 | struct hci_dev *hci_dev_get(int index) | 320 | struct hci_dev *hci_dev_get(int index) |
321 | { | 321 | { |
322 | struct hci_dev *hdev = NULL; | 322 | struct hci_dev *hdev = NULL, *d; |
323 | struct list_head *p; | ||
324 | 323 | ||
325 | BT_DBG("%d", index); | 324 | BT_DBG("%d", index); |
326 | 325 | ||
@@ -328,8 +327,7 @@ struct hci_dev *hci_dev_get(int index) | |||
328 | return NULL; | 327 | return NULL; |
329 | 328 | ||
330 | read_lock(&hci_dev_list_lock); | 329 | read_lock(&hci_dev_list_lock); |
331 | list_for_each(p, &hci_dev_list) { | 330 | list_for_each_entry(d, &hci_dev_list, list) { |
332 | struct hci_dev *d = list_entry(p, struct hci_dev, list); | ||
333 | if (d->id == index) { | 331 | if (d->id == index) { |
334 | hdev = hci_dev_hold(d); | 332 | hdev = hci_dev_hold(d); |
335 | break; | 333 | break; |
@@ -551,8 +549,11 @@ int hci_dev_open(__u16 dev) | |||
551 | hci_dev_hold(hdev); | 549 | hci_dev_hold(hdev); |
552 | set_bit(HCI_UP, &hdev->flags); | 550 | set_bit(HCI_UP, &hdev->flags); |
553 | hci_notify(hdev, HCI_DEV_UP); | 551 | hci_notify(hdev, HCI_DEV_UP); |
554 | if (!test_bit(HCI_SETUP, &hdev->flags)) | 552 | if (!test_bit(HCI_SETUP, &hdev->flags)) { |
555 | mgmt_powered(hdev->id, 1); | 553 | hci_dev_lock_bh(hdev); |
554 | mgmt_powered(hdev, 1); | ||
555 | hci_dev_unlock_bh(hdev); | ||
556 | } | ||
556 | } else { | 557 | } else { |
557 | /* Init failed, cleanup */ | 558 | /* Init failed, cleanup */ |
558 | tasklet_kill(&hdev->rx_task); | 559 | tasklet_kill(&hdev->rx_task); |
@@ -597,6 +598,14 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
597 | tasklet_kill(&hdev->rx_task); | 598 | tasklet_kill(&hdev->rx_task); |
598 | tasklet_kill(&hdev->tx_task); | 599 | tasklet_kill(&hdev->tx_task); |
599 | 600 | ||
601 | if (hdev->discov_timeout > 0) { | ||
602 | cancel_delayed_work(&hdev->discov_off); | ||
603 | hdev->discov_timeout = 0; | ||
604 | } | ||
605 | |||
606 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) | ||
607 | cancel_delayed_work(&hdev->power_off); | ||
608 | |||
600 | hci_dev_lock_bh(hdev); | 609 | hci_dev_lock_bh(hdev); |
601 | inquiry_cache_flush(hdev); | 610 | inquiry_cache_flush(hdev); |
602 | hci_conn_hash_flush(hdev); | 611 | hci_conn_hash_flush(hdev); |
@@ -636,7 +645,9 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
636 | * and no tasks are scheduled. */ | 645 | * and no tasks are scheduled. */ |
637 | hdev->close(hdev); | 646 | hdev->close(hdev); |
638 | 647 | ||
639 | mgmt_powered(hdev->id, 0); | 648 | hci_dev_lock_bh(hdev); |
649 | mgmt_powered(hdev, 0); | ||
650 | hci_dev_unlock_bh(hdev); | ||
640 | 651 | ||
641 | /* Clear flags */ | 652 | /* Clear flags */ |
642 | hdev->flags = 0; | 653 | hdev->flags = 0; |
@@ -794,9 +805,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
794 | 805 | ||
795 | int hci_get_dev_list(void __user *arg) | 806 | int hci_get_dev_list(void __user *arg) |
796 | { | 807 | { |
808 | struct hci_dev *hdev; | ||
797 | struct hci_dev_list_req *dl; | 809 | struct hci_dev_list_req *dl; |
798 | struct hci_dev_req *dr; | 810 | struct hci_dev_req *dr; |
799 | struct list_head *p; | ||
800 | int n = 0, size, err; | 811 | int n = 0, size, err; |
801 | __u16 dev_num; | 812 | __u16 dev_num; |
802 | 813 | ||
@@ -815,12 +826,9 @@ int hci_get_dev_list(void __user *arg) | |||
815 | dr = dl->dev_req; | 826 | dr = dl->dev_req; |
816 | 827 | ||
817 | read_lock_bh(&hci_dev_list_lock); | 828 | read_lock_bh(&hci_dev_list_lock); |
818 | list_for_each(p, &hci_dev_list) { | 829 | list_for_each_entry(hdev, &hci_dev_list, list) { |
819 | struct hci_dev *hdev; | 830 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
820 | 831 | cancel_delayed_work(&hdev->power_off); | |
821 | hdev = list_entry(p, struct hci_dev, list); | ||
822 | |||
823 | hci_del_off_timer(hdev); | ||
824 | 832 | ||
825 | if (!test_bit(HCI_MGMT, &hdev->flags)) | 833 | if (!test_bit(HCI_MGMT, &hdev->flags)) |
826 | set_bit(HCI_PAIRABLE, &hdev->flags); | 834 | set_bit(HCI_PAIRABLE, &hdev->flags); |
@@ -855,7 +863,8 @@ int hci_get_dev_info(void __user *arg) | |||
855 | if (!hdev) | 863 | if (!hdev) |
856 | return -ENODEV; | 864 | return -ENODEV; |
857 | 865 | ||
858 | hci_del_off_timer(hdev); | 866 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) |
867 | cancel_delayed_work_sync(&hdev->power_off); | ||
859 | 868 | ||
860 | if (!test_bit(HCI_MGMT, &hdev->flags)) | 869 | if (!test_bit(HCI_MGMT, &hdev->flags)) |
861 | set_bit(HCI_PAIRABLE, &hdev->flags); | 870 | set_bit(HCI_PAIRABLE, &hdev->flags); |
@@ -912,6 +921,7 @@ struct hci_dev *hci_alloc_dev(void) | |||
912 | if (!hdev) | 921 | if (!hdev) |
913 | return NULL; | 922 | return NULL; |
914 | 923 | ||
924 | hci_init_sysfs(hdev); | ||
915 | skb_queue_head_init(&hdev->driver_init); | 925 | skb_queue_head_init(&hdev->driver_init); |
916 | 926 | ||
917 | return hdev; | 927 | return hdev; |
@@ -938,39 +948,41 @@ static void hci_power_on(struct work_struct *work) | |||
938 | return; | 948 | return; |
939 | 949 | ||
940 | if (test_bit(HCI_AUTO_OFF, &hdev->flags)) | 950 | if (test_bit(HCI_AUTO_OFF, &hdev->flags)) |
941 | mod_timer(&hdev->off_timer, | 951 | queue_delayed_work(hdev->workqueue, &hdev->power_off, |
942 | jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); | 952 | msecs_to_jiffies(AUTO_OFF_TIMEOUT)); |
943 | 953 | ||
944 | if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) | 954 | if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) |
945 | mgmt_index_added(hdev->id); | 955 | mgmt_index_added(hdev); |
946 | } | 956 | } |
947 | 957 | ||
948 | static void hci_power_off(struct work_struct *work) | 958 | static void hci_power_off(struct work_struct *work) |
949 | { | 959 | { |
950 | struct hci_dev *hdev = container_of(work, struct hci_dev, power_off); | 960 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
961 | power_off.work); | ||
951 | 962 | ||
952 | BT_DBG("%s", hdev->name); | 963 | BT_DBG("%s", hdev->name); |
953 | 964 | ||
965 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | ||
966 | |||
954 | hci_dev_close(hdev->id); | 967 | hci_dev_close(hdev->id); |
955 | } | 968 | } |
956 | 969 | ||
957 | static void hci_auto_off(unsigned long data) | 970 | static void hci_discov_off(struct work_struct *work) |
958 | { | 971 | { |
959 | struct hci_dev *hdev = (struct hci_dev *) data; | 972 | struct hci_dev *hdev; |
973 | u8 scan = SCAN_PAGE; | ||
974 | |||
975 | hdev = container_of(work, struct hci_dev, discov_off.work); | ||
960 | 976 | ||
961 | BT_DBG("%s", hdev->name); | 977 | BT_DBG("%s", hdev->name); |
962 | 978 | ||
963 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | 979 | hci_dev_lock_bh(hdev); |
964 | 980 | ||
965 | queue_work(hdev->workqueue, &hdev->power_off); | 981 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); |
966 | } | ||
967 | 982 | ||
968 | void hci_del_off_timer(struct hci_dev *hdev) | 983 | hdev->discov_timeout = 0; |
969 | { | ||
970 | BT_DBG("%s", hdev->name); | ||
971 | 984 | ||
972 | clear_bit(HCI_AUTO_OFF, &hdev->flags); | 985 | hci_dev_unlock_bh(hdev); |
973 | del_timer(&hdev->off_timer); | ||
974 | } | 986 | } |
975 | 987 | ||
976 | int hci_uuids_clear(struct hci_dev *hdev) | 988 | int hci_uuids_clear(struct hci_dev *hdev) |
@@ -1007,16 +1019,11 @@ int hci_link_keys_clear(struct hci_dev *hdev) | |||
1007 | 1019 | ||
1008 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | 1020 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) |
1009 | { | 1021 | { |
1010 | struct list_head *p; | 1022 | struct link_key *k; |
1011 | |||
1012 | list_for_each(p, &hdev->link_keys) { | ||
1013 | struct link_key *k; | ||
1014 | |||
1015 | k = list_entry(p, struct link_key, list); | ||
1016 | 1023 | ||
1024 | list_for_each_entry(k, &hdev->link_keys, list) | ||
1017 | if (bacmp(bdaddr, &k->bdaddr) == 0) | 1025 | if (bacmp(bdaddr, &k->bdaddr) == 0) |
1018 | return k; | 1026 | return k; |
1019 | } | ||
1020 | 1027 | ||
1021 | return NULL; | 1028 | return NULL; |
1022 | } | 1029 | } |
@@ -1138,7 +1145,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, | |||
1138 | 1145 | ||
1139 | persistent = hci_persistent_key(hdev, conn, type, old_key_type); | 1146 | persistent = hci_persistent_key(hdev, conn, type, old_key_type); |
1140 | 1147 | ||
1141 | mgmt_new_key(hdev->id, key, persistent); | 1148 | mgmt_new_link_key(hdev, key, persistent); |
1142 | 1149 | ||
1143 | if (!persistent) { | 1150 | if (!persistent) { |
1144 | list_del(&key->list); | 1151 | list_del(&key->list); |
@@ -1181,7 +1188,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, | |||
1181 | memcpy(id->rand, rand, sizeof(id->rand)); | 1188 | memcpy(id->rand, rand, sizeof(id->rand)); |
1182 | 1189 | ||
1183 | if (new_key) | 1190 | if (new_key) |
1184 | mgmt_new_key(hdev->id, key, old_key_type); | 1191 | mgmt_new_link_key(hdev, key, old_key_type); |
1185 | 1192 | ||
1186 | return 0; | 1193 | return 0; |
1187 | } | 1194 | } |
@@ -1279,16 +1286,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, | |||
1279 | struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, | 1286 | struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, |
1280 | bdaddr_t *bdaddr) | 1287 | bdaddr_t *bdaddr) |
1281 | { | 1288 | { |
1282 | struct list_head *p; | 1289 | struct bdaddr_list *b; |
1283 | |||
1284 | list_for_each(p, &hdev->blacklist) { | ||
1285 | struct bdaddr_list *b; | ||
1286 | |||
1287 | b = list_entry(p, struct bdaddr_list, list); | ||
1288 | 1290 | ||
1291 | list_for_each_entry(b, &hdev->blacklist, list) | ||
1289 | if (bacmp(bdaddr, &b->bdaddr) == 0) | 1292 | if (bacmp(bdaddr, &b->bdaddr) == 0) |
1290 | return b; | 1293 | return b; |
1291 | } | ||
1292 | 1294 | ||
1293 | return NULL; | 1295 | return NULL; |
1294 | } | 1296 | } |
@@ -1327,7 +1329,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1327 | 1329 | ||
1328 | list_add(&entry->list, &hdev->blacklist); | 1330 | list_add(&entry->list, &hdev->blacklist); |
1329 | 1331 | ||
1330 | return mgmt_device_blocked(hdev->id, bdaddr); | 1332 | return mgmt_device_blocked(hdev, bdaddr); |
1331 | } | 1333 | } |
1332 | 1334 | ||
1333 | int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) | 1335 | int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) |
@@ -1346,7 +1348,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1346 | list_del(&entry->list); | 1348 | list_del(&entry->list); |
1347 | kfree(entry); | 1349 | kfree(entry); |
1348 | 1350 | ||
1349 | return mgmt_device_unblocked(hdev->id, bdaddr); | 1351 | return mgmt_device_unblocked(hdev, bdaddr); |
1350 | } | 1352 | } |
1351 | 1353 | ||
1352 | static void hci_clear_adv_cache(unsigned long arg) | 1354 | static void hci_clear_adv_cache(unsigned long arg) |
@@ -1425,7 +1427,7 @@ int hci_add_adv_entry(struct hci_dev *hdev, | |||
1425 | int hci_register_dev(struct hci_dev *hdev) | 1427 | int hci_register_dev(struct hci_dev *hdev) |
1426 | { | 1428 | { |
1427 | struct list_head *head = &hci_dev_list, *p; | 1429 | struct list_head *head = &hci_dev_list, *p; |
1428 | int i, id = 0; | 1430 | int i, id, error; |
1429 | 1431 | ||
1430 | BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, | 1432 | BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, |
1431 | hdev->bus, hdev->owner); | 1433 | hdev->bus, hdev->owner); |
@@ -1433,6 +1435,11 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1433 | if (!hdev->open || !hdev->close || !hdev->destruct) | 1435 | if (!hdev->open || !hdev->close || !hdev->destruct) |
1434 | return -EINVAL; | 1436 | return -EINVAL; |
1435 | 1437 | ||
1438 | /* Do not allow HCI_AMP devices to register at index 0, | ||
1439 | * so the index can be used as the AMP controller ID. | ||
1440 | */ | ||
1441 | id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; | ||
1442 | |||
1436 | write_lock_bh(&hci_dev_list_lock); | 1443 | write_lock_bh(&hci_dev_list_lock); |
1437 | 1444 | ||
1438 | /* Find first available device id */ | 1445 | /* Find first available device id */ |
@@ -1479,6 +1486,8 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1479 | 1486 | ||
1480 | hci_conn_hash_init(hdev); | 1487 | hci_conn_hash_init(hdev); |
1481 | 1488 | ||
1489 | INIT_LIST_HEAD(&hdev->mgmt_pending); | ||
1490 | |||
1482 | INIT_LIST_HEAD(&hdev->blacklist); | 1491 | INIT_LIST_HEAD(&hdev->blacklist); |
1483 | 1492 | ||
1484 | INIT_LIST_HEAD(&hdev->uuids); | 1493 | INIT_LIST_HEAD(&hdev->uuids); |
@@ -1492,8 +1501,9 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1492 | (unsigned long) hdev); | 1501 | (unsigned long) hdev); |
1493 | 1502 | ||
1494 | INIT_WORK(&hdev->power_on, hci_power_on); | 1503 | INIT_WORK(&hdev->power_on, hci_power_on); |
1495 | INIT_WORK(&hdev->power_off, hci_power_off); | 1504 | INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); |
1496 | setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); | 1505 | |
1506 | INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); | ||
1497 | 1507 | ||
1498 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); | 1508 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); |
1499 | 1509 | ||
@@ -1502,10 +1512,14 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1502 | write_unlock_bh(&hci_dev_list_lock); | 1512 | write_unlock_bh(&hci_dev_list_lock); |
1503 | 1513 | ||
1504 | hdev->workqueue = create_singlethread_workqueue(hdev->name); | 1514 | hdev->workqueue = create_singlethread_workqueue(hdev->name); |
1505 | if (!hdev->workqueue) | 1515 | if (!hdev->workqueue) { |
1506 | goto nomem; | 1516 | error = -ENOMEM; |
1517 | goto err; | ||
1518 | } | ||
1507 | 1519 | ||
1508 | hci_register_sysfs(hdev); | 1520 | error = hci_add_sysfs(hdev); |
1521 | if (error < 0) | ||
1522 | goto err_wqueue; | ||
1509 | 1523 | ||
1510 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, | 1524 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, |
1511 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); | 1525 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); |
@@ -1524,17 +1538,19 @@ int hci_register_dev(struct hci_dev *hdev) | |||
1524 | 1538 | ||
1525 | return id; | 1539 | return id; |
1526 | 1540 | ||
1527 | nomem: | 1541 | err_wqueue: |
1542 | destroy_workqueue(hdev->workqueue); | ||
1543 | err: | ||
1528 | write_lock_bh(&hci_dev_list_lock); | 1544 | write_lock_bh(&hci_dev_list_lock); |
1529 | list_del(&hdev->list); | 1545 | list_del(&hdev->list); |
1530 | write_unlock_bh(&hci_dev_list_lock); | 1546 | write_unlock_bh(&hci_dev_list_lock); |
1531 | 1547 | ||
1532 | return -ENOMEM; | 1548 | return error; |
1533 | } | 1549 | } |
1534 | EXPORT_SYMBOL(hci_register_dev); | 1550 | EXPORT_SYMBOL(hci_register_dev); |
1535 | 1551 | ||
1536 | /* Unregister HCI device */ | 1552 | /* Unregister HCI device */ |
1537 | int hci_unregister_dev(struct hci_dev *hdev) | 1553 | void hci_unregister_dev(struct hci_dev *hdev) |
1538 | { | 1554 | { |
1539 | int i; | 1555 | int i; |
1540 | 1556 | ||
@@ -1550,8 +1566,15 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
1550 | kfree_skb(hdev->reassembly[i]); | 1566 | kfree_skb(hdev->reassembly[i]); |
1551 | 1567 | ||
1552 | if (!test_bit(HCI_INIT, &hdev->flags) && | 1568 | if (!test_bit(HCI_INIT, &hdev->flags) && |
1553 | !test_bit(HCI_SETUP, &hdev->flags)) | 1569 | !test_bit(HCI_SETUP, &hdev->flags)) { |
1554 | mgmt_index_removed(hdev->id); | 1570 | hci_dev_lock_bh(hdev); |
1571 | mgmt_index_removed(hdev); | ||
1572 | hci_dev_unlock_bh(hdev); | ||
1573 | } | ||
1574 | |||
1575 | /* mgmt_index_removed should take care of emptying the | ||
1576 | * pending list */ | ||
1577 | BUG_ON(!list_empty(&hdev->mgmt_pending)); | ||
1555 | 1578 | ||
1556 | hci_notify(hdev, HCI_DEV_UNREG); | 1579 | hci_notify(hdev, HCI_DEV_UNREG); |
1557 | 1580 | ||
@@ -1560,9 +1583,8 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
1560 | rfkill_destroy(hdev->rfkill); | 1583 | rfkill_destroy(hdev->rfkill); |
1561 | } | 1584 | } |
1562 | 1585 | ||
1563 | hci_unregister_sysfs(hdev); | 1586 | hci_del_sysfs(hdev); |
1564 | 1587 | ||
1565 | hci_del_off_timer(hdev); | ||
1566 | del_timer(&hdev->adv_timer); | 1588 | del_timer(&hdev->adv_timer); |
1567 | 1589 | ||
1568 | destroy_workqueue(hdev->workqueue); | 1590 | destroy_workqueue(hdev->workqueue); |
@@ -1576,8 +1598,6 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
1576 | hci_dev_unlock_bh(hdev); | 1598 | hci_dev_unlock_bh(hdev); |
1577 | 1599 | ||
1578 | __hci_dev_put(hdev); | 1600 | __hci_dev_put(hdev); |
1579 | |||
1580 | return 0; | ||
1581 | } | 1601 | } |
1582 | EXPORT_SYMBOL(hci_unregister_dev); | 1602 | EXPORT_SYMBOL(hci_unregister_dev); |
1583 | 1603 | ||
@@ -1948,23 +1968,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |||
1948 | hdr->dlen = cpu_to_le16(len); | 1968 | hdr->dlen = cpu_to_le16(len); |
1949 | } | 1969 | } |
1950 | 1970 | ||
1951 | void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | 1971 | static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, |
1972 | struct sk_buff *skb, __u16 flags) | ||
1952 | { | 1973 | { |
1953 | struct hci_dev *hdev = conn->hdev; | 1974 | struct hci_dev *hdev = conn->hdev; |
1954 | struct sk_buff *list; | 1975 | struct sk_buff *list; |
1955 | 1976 | ||
1956 | BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); | ||
1957 | |||
1958 | skb->dev = (void *) hdev; | ||
1959 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; | ||
1960 | hci_add_acl_hdr(skb, conn->handle, flags); | ||
1961 | |||
1962 | list = skb_shinfo(skb)->frag_list; | 1977 | list = skb_shinfo(skb)->frag_list; |
1963 | if (!list) { | 1978 | if (!list) { |
1964 | /* Non fragmented */ | 1979 | /* Non fragmented */ |
1965 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); | 1980 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); |
1966 | 1981 | ||
1967 | skb_queue_tail(&conn->data_q, skb); | 1982 | skb_queue_tail(queue, skb); |
1968 | } else { | 1983 | } else { |
1969 | /* Fragmented */ | 1984 | /* Fragmented */ |
1970 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | 1985 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
@@ -1972,9 +1987,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |||
1972 | skb_shinfo(skb)->frag_list = NULL; | 1987 | skb_shinfo(skb)->frag_list = NULL; |
1973 | 1988 | ||
1974 | /* Queue all fragments atomically */ | 1989 | /* Queue all fragments atomically */ |
1975 | spin_lock_bh(&conn->data_q.lock); | 1990 | spin_lock_bh(&queue->lock); |
1976 | 1991 | ||
1977 | __skb_queue_tail(&conn->data_q, skb); | 1992 | __skb_queue_tail(queue, skb); |
1978 | 1993 | ||
1979 | flags &= ~ACL_START; | 1994 | flags &= ~ACL_START; |
1980 | flags |= ACL_CONT; | 1995 | flags |= ACL_CONT; |
@@ -1987,11 +2002,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) | |||
1987 | 2002 | ||
1988 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | 2003 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
1989 | 2004 | ||
1990 | __skb_queue_tail(&conn->data_q, skb); | 2005 | __skb_queue_tail(queue, skb); |
1991 | } while (list); | 2006 | } while (list); |
1992 | 2007 | ||
1993 | spin_unlock_bh(&conn->data_q.lock); | 2008 | spin_unlock_bh(&queue->lock); |
1994 | } | 2009 | } |
2010 | } | ||
2011 | |||
2012 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) | ||
2013 | { | ||
2014 | struct hci_conn *conn = chan->conn; | ||
2015 | struct hci_dev *hdev = conn->hdev; | ||
2016 | |||
2017 | BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); | ||
2018 | |||
2019 | skb->dev = (void *) hdev; | ||
2020 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; | ||
2021 | hci_add_acl_hdr(skb, conn->handle, flags); | ||
2022 | |||
2023 | hci_queue_acl(conn, &chan->data_q, skb, flags); | ||
1995 | 2024 | ||
1996 | tasklet_schedule(&hdev->tx_task); | 2025 | tasklet_schedule(&hdev->tx_task); |
1997 | } | 2026 | } |
@@ -2026,16 +2055,12 @@ EXPORT_SYMBOL(hci_send_sco); | |||
2026 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) | 2055 | static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) |
2027 | { | 2056 | { |
2028 | struct hci_conn_hash *h = &hdev->conn_hash; | 2057 | struct hci_conn_hash *h = &hdev->conn_hash; |
2029 | struct hci_conn *conn = NULL; | 2058 | struct hci_conn *conn = NULL, *c; |
2030 | int num = 0, min = ~0; | 2059 | int num = 0, min = ~0; |
2031 | struct list_head *p; | ||
2032 | 2060 | ||
2033 | /* We don't have to lock device here. Connections are always | 2061 | /* We don't have to lock device here. Connections are always |
2034 | * added and removed with TX task disabled. */ | 2062 | * added and removed with TX task disabled. */ |
2035 | list_for_each(p, &h->list) { | 2063 | list_for_each_entry(c, &h->list, list) { |
2036 | struct hci_conn *c; | ||
2037 | c = list_entry(p, struct hci_conn, list); | ||
2038 | |||
2039 | if (c->type != type || skb_queue_empty(&c->data_q)) | 2064 | if (c->type != type || skb_queue_empty(&c->data_q)) |
2040 | continue; | 2065 | continue; |
2041 | 2066 | ||
@@ -2084,14 +2109,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int | |||
2084 | static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | 2109 | static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) |
2085 | { | 2110 | { |
2086 | struct hci_conn_hash *h = &hdev->conn_hash; | 2111 | struct hci_conn_hash *h = &hdev->conn_hash; |
2087 | struct list_head *p; | 2112 | struct hci_conn *c; |
2088 | struct hci_conn *c; | ||
2089 | 2113 | ||
2090 | BT_ERR("%s link tx timeout", hdev->name); | 2114 | BT_ERR("%s link tx timeout", hdev->name); |
2091 | 2115 | ||
2092 | /* Kill stalled connections */ | 2116 | /* Kill stalled connections */ |
2093 | list_for_each(p, &h->list) { | 2117 | list_for_each_entry(c, &h->list, list) { |
2094 | c = list_entry(p, struct hci_conn, list); | ||
2095 | if (c->type == type && c->sent) { | 2118 | if (c->type == type && c->sent) { |
2096 | BT_ERR("%s killing stalled connection %s", | 2119 | BT_ERR("%s killing stalled connection %s", |
2097 | hdev->name, batostr(&c->dst)); | 2120 | hdev->name, batostr(&c->dst)); |
@@ -2100,11 +2123,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | |||
2100 | } | 2123 | } |
2101 | } | 2124 | } |
2102 | 2125 | ||
2103 | static inline void hci_sched_acl(struct hci_dev *hdev) | 2126 | static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, |
2127 | int *quote) | ||
2104 | { | 2128 | { |
2129 | struct hci_conn_hash *h = &hdev->conn_hash; | ||
2130 | struct hci_chan *chan = NULL; | ||
2131 | int num = 0, min = ~0, cur_prio = 0; | ||
2105 | struct hci_conn *conn; | 2132 | struct hci_conn *conn; |
2133 | int cnt, q, conn_num = 0; | ||
2134 | |||
2135 | BT_DBG("%s", hdev->name); | ||
2136 | |||
2137 | list_for_each_entry(conn, &h->list, list) { | ||
2138 | struct hci_chan_hash *ch; | ||
2139 | struct hci_chan *tmp; | ||
2140 | |||
2141 | if (conn->type != type) | ||
2142 | continue; | ||
2143 | |||
2144 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | ||
2145 | continue; | ||
2146 | |||
2147 | conn_num++; | ||
2148 | |||
2149 | ch = &conn->chan_hash; | ||
2150 | |||
2151 | list_for_each_entry(tmp, &ch->list, list) { | ||
2152 | struct sk_buff *skb; | ||
2153 | |||
2154 | if (skb_queue_empty(&tmp->data_q)) | ||
2155 | continue; | ||
2156 | |||
2157 | skb = skb_peek(&tmp->data_q); | ||
2158 | if (skb->priority < cur_prio) | ||
2159 | continue; | ||
2160 | |||
2161 | if (skb->priority > cur_prio) { | ||
2162 | num = 0; | ||
2163 | min = ~0; | ||
2164 | cur_prio = skb->priority; | ||
2165 | } | ||
2166 | |||
2167 | num++; | ||
2168 | |||
2169 | if (conn->sent < min) { | ||
2170 | min = conn->sent; | ||
2171 | chan = tmp; | ||
2172 | } | ||
2173 | } | ||
2174 | |||
2175 | if (hci_conn_num(hdev, type) == conn_num) | ||
2176 | break; | ||
2177 | } | ||
2178 | |||
2179 | if (!chan) | ||
2180 | return NULL; | ||
2181 | |||
2182 | switch (chan->conn->type) { | ||
2183 | case ACL_LINK: | ||
2184 | cnt = hdev->acl_cnt; | ||
2185 | break; | ||
2186 | case SCO_LINK: | ||
2187 | case ESCO_LINK: | ||
2188 | cnt = hdev->sco_cnt; | ||
2189 | break; | ||
2190 | case LE_LINK: | ||
2191 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; | ||
2192 | break; | ||
2193 | default: | ||
2194 | cnt = 0; | ||
2195 | BT_ERR("Unknown link type"); | ||
2196 | } | ||
2197 | |||
2198 | q = cnt / num; | ||
2199 | *quote = q ? q : 1; | ||
2200 | BT_DBG("chan %p quote %d", chan, *quote); | ||
2201 | return chan; | ||
2202 | } | ||
2203 | |||
2204 | static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) | ||
2205 | { | ||
2206 | struct hci_conn_hash *h = &hdev->conn_hash; | ||
2207 | struct hci_conn *conn; | ||
2208 | int num = 0; | ||
2209 | |||
2210 | BT_DBG("%s", hdev->name); | ||
2211 | |||
2212 | list_for_each_entry(conn, &h->list, list) { | ||
2213 | struct hci_chan_hash *ch; | ||
2214 | struct hci_chan *chan; | ||
2215 | |||
2216 | if (conn->type != type) | ||
2217 | continue; | ||
2218 | |||
2219 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | ||
2220 | continue; | ||
2221 | |||
2222 | num++; | ||
2223 | |||
2224 | ch = &conn->chan_hash; | ||
2225 | list_for_each_entry(chan, &ch->list, list) { | ||
2226 | struct sk_buff *skb; | ||
2227 | |||
2228 | if (chan->sent) { | ||
2229 | chan->sent = 0; | ||
2230 | continue; | ||
2231 | } | ||
2232 | |||
2233 | if (skb_queue_empty(&chan->data_q)) | ||
2234 | continue; | ||
2235 | |||
2236 | skb = skb_peek(&chan->data_q); | ||
2237 | if (skb->priority >= HCI_PRIO_MAX - 1) | ||
2238 | continue; | ||
2239 | |||
2240 | skb->priority = HCI_PRIO_MAX - 1; | ||
2241 | |||
2242 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, | ||
2243 | skb->priority); | ||
2244 | } | ||
2245 | |||
2246 | if (hci_conn_num(hdev, type) == num) | ||
2247 | break; | ||
2248 | } | ||
2249 | } | ||
2250 | |||
2251 | static inline void hci_sched_acl(struct hci_dev *hdev) | ||
2252 | { | ||
2253 | struct hci_chan *chan; | ||
2106 | struct sk_buff *skb; | 2254 | struct sk_buff *skb; |
2107 | int quote; | 2255 | int quote; |
2256 | unsigned int cnt; | ||
2108 | 2257 | ||
2109 | BT_DBG("%s", hdev->name); | 2258 | BT_DBG("%s", hdev->name); |
2110 | 2259 | ||
@@ -2118,19 +2267,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev) | |||
2118 | hci_link_tx_to(hdev, ACL_LINK); | 2267 | hci_link_tx_to(hdev, ACL_LINK); |
2119 | } | 2268 | } |
2120 | 2269 | ||
2121 | while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { | 2270 | cnt = hdev->acl_cnt; |
2122 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 2271 | |
2123 | BT_DBG("skb %p len %d", skb, skb->len); | 2272 | while (hdev->acl_cnt && |
2273 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | ||
2274 | u32 priority = (skb_peek(&chan->data_q))->priority; | ||
2275 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | ||
2276 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | ||
2277 | skb->len, skb->priority); | ||
2124 | 2278 | ||
2125 | hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); | 2279 | /* Stop if priority has changed */ |
2280 | if (skb->priority < priority) | ||
2281 | break; | ||
2282 | |||
2283 | skb = skb_dequeue(&chan->data_q); | ||
2284 | |||
2285 | hci_conn_enter_active_mode(chan->conn, | ||
2286 | bt_cb(skb)->force_active); | ||
2126 | 2287 | ||
2127 | hci_send_frame(skb); | 2288 | hci_send_frame(skb); |
2128 | hdev->acl_last_tx = jiffies; | 2289 | hdev->acl_last_tx = jiffies; |
2129 | 2290 | ||
2130 | hdev->acl_cnt--; | 2291 | hdev->acl_cnt--; |
2131 | conn->sent++; | 2292 | chan->sent++; |
2293 | chan->conn->sent++; | ||
2132 | } | 2294 | } |
2133 | } | 2295 | } |
2296 | |||
2297 | if (cnt != hdev->acl_cnt) | ||
2298 | hci_prio_recalculate(hdev, ACL_LINK); | ||
2134 | } | 2299 | } |
2135 | 2300 | ||
2136 | /* Schedule SCO */ | 2301 | /* Schedule SCO */ |
@@ -2182,9 +2347,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev) | |||
2182 | 2347 | ||
2183 | static inline void hci_sched_le(struct hci_dev *hdev) | 2348 | static inline void hci_sched_le(struct hci_dev *hdev) |
2184 | { | 2349 | { |
2185 | struct hci_conn *conn; | 2350 | struct hci_chan *chan; |
2186 | struct sk_buff *skb; | 2351 | struct sk_buff *skb; |
2187 | int quote, cnt; | 2352 | int quote, cnt, tmp; |
2188 | 2353 | ||
2189 | BT_DBG("%s", hdev->name); | 2354 | BT_DBG("%s", hdev->name); |
2190 | 2355 | ||
@@ -2200,21 +2365,35 @@ static inline void hci_sched_le(struct hci_dev *hdev) | |||
2200 | } | 2365 | } |
2201 | 2366 | ||
2202 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; | 2367 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; |
2203 | while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) { | 2368 | tmp = cnt; |
2204 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | 2369 | while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { |
2205 | BT_DBG("skb %p len %d", skb, skb->len); | 2370 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2371 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | ||
2372 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | ||
2373 | skb->len, skb->priority); | ||
2374 | |||
2375 | /* Stop if priority has changed */ | ||
2376 | if (skb->priority < priority) | ||
2377 | break; | ||
2378 | |||
2379 | skb = skb_dequeue(&chan->data_q); | ||
2206 | 2380 | ||
2207 | hci_send_frame(skb); | 2381 | hci_send_frame(skb); |
2208 | hdev->le_last_tx = jiffies; | 2382 | hdev->le_last_tx = jiffies; |
2209 | 2383 | ||
2210 | cnt--; | 2384 | cnt--; |
2211 | conn->sent++; | 2385 | chan->sent++; |
2386 | chan->conn->sent++; | ||
2212 | } | 2387 | } |
2213 | } | 2388 | } |
2389 | |||
2214 | if (hdev->le_pkts) | 2390 | if (hdev->le_pkts) |
2215 | hdev->le_cnt = cnt; | 2391 | hdev->le_cnt = cnt; |
2216 | else | 2392 | else |
2217 | hdev->acl_cnt = cnt; | 2393 | hdev->acl_cnt = cnt; |
2394 | |||
2395 | if (cnt != tmp) | ||
2396 | hci_prio_recalculate(hdev, LE_LINK); | ||
2218 | } | 2397 | } |
2219 | 2398 | ||
2220 | static void hci_tx_task(unsigned long arg) | 2399 | static void hci_tx_task(unsigned long arg) |
@@ -2407,3 +2586,31 @@ static void hci_cmd_task(unsigned long arg) | |||
2407 | } | 2586 | } |
2408 | } | 2587 | } |
2409 | } | 2588 | } |
2589 | |||
2590 | int hci_do_inquiry(struct hci_dev *hdev, u8 length) | ||
2591 | { | ||
2592 | /* General inquiry access code (GIAC) */ | ||
2593 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; | ||
2594 | struct hci_cp_inquiry cp; | ||
2595 | |||
2596 | BT_DBG("%s", hdev->name); | ||
2597 | |||
2598 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | ||
2599 | return -EINPROGRESS; | ||
2600 | |||
2601 | memset(&cp, 0, sizeof(cp)); | ||
2602 | memcpy(&cp.lap, lap, sizeof(cp.lap)); | ||
2603 | cp.length = length; | ||
2604 | |||
2605 | return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); | ||
2606 | } | ||
2607 | |||
2608 | int hci_cancel_inquiry(struct hci_dev *hdev) | ||
2609 | { | ||
2610 | BT_DBG("%s", hdev->name); | ||
2611 | |||
2612 | if (!test_bit(HCI_INQUIRY, &hdev->flags)) | ||
2613 | return -EPERM; | ||
2614 | |||
2615 | return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); | ||
2616 | } | ||