aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth/mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth/mgmt.c')
-rw-r--r--net/bluetooth/mgmt.c637
1 files changed, 457 insertions, 180 deletions
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 861e389f4b4c..074d83690a41 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -536,6 +536,156 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
536 return ptr; 536 return ptr;
537} 537}
538 538
539static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
540{
541 struct pending_cmd *cmd;
542
543 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
544 if (cmd->opcode == opcode)
545 return cmd;
546 }
547
548 return NULL;
549}
550
551static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
552{
553 u8 ad_len = 0;
554 size_t name_len;
555
556 name_len = strlen(hdev->dev_name);
557 if (name_len > 0) {
558 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
559
560 if (name_len > max_len) {
561 name_len = max_len;
562 ptr[1] = EIR_NAME_SHORT;
563 } else
564 ptr[1] = EIR_NAME_COMPLETE;
565
566 ptr[0] = name_len + 1;
567
568 memcpy(ptr + 2, hdev->dev_name, name_len);
569
570 ad_len += (name_len + 2);
571 ptr += (name_len + 2);
572 }
573
574 return ad_len;
575}
576
577static void update_scan_rsp_data(struct hci_request *req)
578{
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_le_set_scan_rsp_data cp;
581 u8 len;
582
583 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
584 return;
585
586 memset(&cp, 0, sizeof(cp));
587
588 len = create_scan_rsp_data(hdev, cp.data);
589
590 if (hdev->scan_rsp_data_len == len &&
591 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
592 return;
593
594 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
595 hdev->scan_rsp_data_len = len;
596
597 cp.length = len;
598
599 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
600}
601
602static u8 get_adv_discov_flags(struct hci_dev *hdev)
603{
604 struct pending_cmd *cmd;
605
606 /* If there's a pending mgmt command the flags will not yet have
607 * their final values, so check for this first.
608 */
609 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
610 if (cmd) {
611 struct mgmt_mode *cp = cmd->param;
612 if (cp->val == 0x01)
613 return LE_AD_GENERAL;
614 else if (cp->val == 0x02)
615 return LE_AD_LIMITED;
616 } else {
617 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
618 return LE_AD_LIMITED;
619 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
620 return LE_AD_GENERAL;
621 }
622
623 return 0;
624}
625
626static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
627{
628 u8 ad_len = 0, flags = 0;
629
630 flags |= get_adv_discov_flags(hdev);
631
632 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
633 if (lmp_le_br_capable(hdev))
634 flags |= LE_AD_SIM_LE_BREDR_CTRL;
635 if (lmp_host_le_br_capable(hdev))
636 flags |= LE_AD_SIM_LE_BREDR_HOST;
637 } else {
638 flags |= LE_AD_NO_BREDR;
639 }
640
641 if (flags) {
642 BT_DBG("adv flags 0x%02x", flags);
643
644 ptr[0] = 2;
645 ptr[1] = EIR_FLAGS;
646 ptr[2] = flags;
647
648 ad_len += 3;
649 ptr += 3;
650 }
651
652 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
653 ptr[0] = 2;
654 ptr[1] = EIR_TX_POWER;
655 ptr[2] = (u8) hdev->adv_tx_power;
656
657 ad_len += 3;
658 ptr += 3;
659 }
660
661 return ad_len;
662}
663
664static void update_adv_data(struct hci_request *req)
665{
666 struct hci_dev *hdev = req->hdev;
667 struct hci_cp_le_set_adv_data cp;
668 u8 len;
669
670 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
671 return;
672
673 memset(&cp, 0, sizeof(cp));
674
675 len = create_adv_data(hdev, cp.data);
676
677 if (hdev->adv_data_len == len &&
678 memcmp(cp.data, hdev->adv_data, len) == 0)
679 return;
680
681 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 hdev->adv_data_len = len;
683
684 cp.length = len;
685
686 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
687}
688
539static void create_eir(struct hci_dev *hdev, u8 *data) 689static void create_eir(struct hci_dev *hdev, u8 *data)
540{ 690{
541 u8 *ptr = data; 691 u8 *ptr = data;
@@ -634,6 +784,9 @@ static void update_class(struct hci_request *req)
634 if (!hdev_is_powered(hdev)) 784 if (!hdev_is_powered(hdev))
635 return; 785 return;
636 786
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 return;
789
637 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 790 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
638 return; 791 return;
639 792
@@ -641,6 +794,9 @@ static void update_class(struct hci_request *req)
641 cod[1] = hdev->major_class; 794 cod[1] = hdev->major_class;
642 cod[2] = get_service_classes(hdev); 795 cod[2] = get_service_classes(hdev);
643 796
797 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
798 cod[1] |= 0x20;
799
644 if (memcmp(cod, hdev->dev_class, 3) == 0) 800 if (memcmp(cod, hdev->dev_class, 3) == 0)
645 return; 801 return;
646 802
@@ -765,18 +921,6 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
765 } 921 }
766} 922}
767 923
768static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
769{
770 struct pending_cmd *cmd;
771
772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
773 if (cmd->opcode == opcode)
774 return cmd;
775 }
776
777 return NULL;
778}
779
780static void mgmt_pending_remove(struct pending_cmd *cmd) 924static void mgmt_pending_remove(struct pending_cmd *cmd)
781{ 925{
782 list_del(&cmd->list); 926 list_del(&cmd->list);
@@ -939,6 +1083,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
939{ 1083{
940 struct pending_cmd *cmd; 1084 struct pending_cmd *cmd;
941 struct mgmt_mode *cp; 1085 struct mgmt_mode *cp;
1086 struct hci_request req;
942 bool changed; 1087 bool changed;
943 1088
944 BT_DBG("status 0x%02x", status); 1089 BT_DBG("status 0x%02x", status);
@@ -952,22 +1097,38 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
952 if (status) { 1097 if (status) {
953 u8 mgmt_err = mgmt_status(status); 1098 u8 mgmt_err = mgmt_status(status);
954 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); 1099 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1100 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
955 goto remove_cmd; 1101 goto remove_cmd;
956 } 1102 }
957 1103
958 cp = cmd->param; 1104 cp = cmd->param;
959 if (cp->val) 1105 if (cp->val) {
960 changed = !test_and_set_bit(HCI_DISCOVERABLE, 1106 changed = !test_and_set_bit(HCI_DISCOVERABLE,
961 &hdev->dev_flags); 1107 &hdev->dev_flags);
962 else 1108
1109 if (hdev->discov_timeout > 0) {
1110 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1111 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1112 to);
1113 }
1114 } else {
963 changed = test_and_clear_bit(HCI_DISCOVERABLE, 1115 changed = test_and_clear_bit(HCI_DISCOVERABLE,
964 &hdev->dev_flags); 1116 &hdev->dev_flags);
1117 }
965 1118
966 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); 1119 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
967 1120
968 if (changed) 1121 if (changed)
969 new_settings(hdev, cmd->sk); 1122 new_settings(hdev, cmd->sk);
970 1123
1124 /* When the discoverable mode gets changed, make sure
1125 * that class of device has the limited discoverable
1126 * bit correctly set.
1127 */
1128 hci_req_init(&req, hdev);
1129 update_class(&req);
1130 hci_req_run(&req, NULL);
1131
971remove_cmd: 1132remove_cmd:
972 mgmt_pending_remove(cmd); 1133 mgmt_pending_remove(cmd);
973 1134
@@ -982,22 +1143,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
982 struct pending_cmd *cmd; 1143 struct pending_cmd *cmd;
983 struct hci_request req; 1144 struct hci_request req;
984 u16 timeout; 1145 u16 timeout;
985 u8 scan, status; 1146 u8 scan;
986 int err; 1147 int err;
987 1148
988 BT_DBG("request for %s", hdev->name); 1149 BT_DBG("request for %s", hdev->name);
989 1150
990 status = mgmt_bredr_support(hdev); 1151 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
991 if (status) 1152 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
992 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1153 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
993 status); 1154 MGMT_STATUS_REJECTED);
994 1155
995 if (cp->val != 0x00 && cp->val != 0x01) 1156 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
996 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
997 MGMT_STATUS_INVALID_PARAMS); 1158 MGMT_STATUS_INVALID_PARAMS);
998 1159
999 timeout = __le16_to_cpu(cp->timeout); 1160 timeout = __le16_to_cpu(cp->timeout);
1000 if (!cp->val && timeout > 0) 1161
1162 /* Disabling discoverable requires that no timeout is set,
1163 * and enabling limited discoverable requires a timeout.
1164 */
1165 if ((cp->val == 0x00 && timeout > 0) ||
1166 (cp->val == 0x02 && timeout == 0))
1001 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1167 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1002 MGMT_STATUS_INVALID_PARAMS); 1168 MGMT_STATUS_INVALID_PARAMS);
1003 1169
@@ -1025,6 +1191,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1025 if (!hdev_is_powered(hdev)) { 1191 if (!hdev_is_powered(hdev)) {
1026 bool changed = false; 1192 bool changed = false;
1027 1193
1194 /* Setting limited discoverable when powered off is
1195 * not a valid operation since it requires a timeout
1196 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1197 */
1028 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 1198 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1029 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1199 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1030 changed = true; 1200 changed = true;
@@ -1040,16 +1210,20 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1040 goto failed; 1210 goto failed;
1041 } 1211 }
1042 1212
1043 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 1213 /* If the current mode is the same, then just update the timeout
1044 if (hdev->discov_timeout > 0) { 1214 * value with the new value. And if only the timeout gets updated,
1045 cancel_delayed_work(&hdev->discov_off); 1215 * then no need for any HCI transactions.
1046 hdev->discov_timeout = 0; 1216 */
1047 } 1217 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1218 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1219 &hdev->dev_flags)) {
1220 cancel_delayed_work(&hdev->discov_off);
1221 hdev->discov_timeout = timeout;
1048 1222
1049 if (cp->val && timeout > 0) { 1223 if (cp->val && hdev->discov_timeout > 0) {
1050 hdev->discov_timeout = timeout; 1224 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1051 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 1225 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1052 msecs_to_jiffies(hdev->discov_timeout * 1000)); 1226 to);
1053 } 1227 }
1054 1228
1055 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 1229 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -1062,24 +1236,66 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1062 goto failed; 1236 goto failed;
1063 } 1237 }
1064 1238
1239 /* Cancel any potential discoverable timeout that might be
1240 * still active and store new timeout value. The arming of
1241 * the timeout happens in the complete handler.
1242 */
1243 cancel_delayed_work(&hdev->discov_off);
1244 hdev->discov_timeout = timeout;
1245
1246 /* Limited discoverable mode */
1247 if (cp->val == 0x02)
1248 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1249 else
1250 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1251
1065 hci_req_init(&req, hdev); 1252 hci_req_init(&req, hdev);
1066 1253
1254 /* The procedure for LE-only controllers is much simpler - just
1255 * update the advertising data.
1256 */
1257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1258 goto update_ad;
1259
1067 scan = SCAN_PAGE; 1260 scan = SCAN_PAGE;
1068 1261
1069 if (cp->val) 1262 if (cp->val) {
1263 struct hci_cp_write_current_iac_lap hci_cp;
1264
1265 if (cp->val == 0x02) {
1266 /* Limited discoverable mode */
1267 hci_cp.num_iac = 2;
1268 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1269 hci_cp.iac_lap[1] = 0x8b;
1270 hci_cp.iac_lap[2] = 0x9e;
1271 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1272 hci_cp.iac_lap[4] = 0x8b;
1273 hci_cp.iac_lap[5] = 0x9e;
1274 } else {
1275 /* General discoverable mode */
1276 hci_cp.num_iac = 1;
1277 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1278 hci_cp.iac_lap[1] = 0x8b;
1279 hci_cp.iac_lap[2] = 0x9e;
1280 }
1281
1282 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1283 (hci_cp.num_iac * 3) + 1, &hci_cp);
1284
1070 scan |= SCAN_INQUIRY; 1285 scan |= SCAN_INQUIRY;
1071 else 1286 } else {
1072 cancel_delayed_work(&hdev->discov_off); 1287 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1288 }
1073 1289
1074 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1290 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1291
1292update_ad:
1293 update_adv_data(&req);
1075 1294
1076 err = hci_req_run(&req, set_discoverable_complete); 1295 err = hci_req_run(&req, set_discoverable_complete);
1077 if (err < 0) 1296 if (err < 0)
1078 mgmt_pending_remove(cmd); 1297 mgmt_pending_remove(cmd);
1079 1298
1080 if (cp->val)
1081 hdev->discov_timeout = timeout;
1082
1083failed: 1299failed:
1084 hci_dev_unlock(hdev); 1300 hci_dev_unlock(hdev);
1085 return err; 1301 return err;
@@ -1091,6 +1307,9 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
1091 struct hci_cp_write_page_scan_activity acp; 1307 struct hci_cp_write_page_scan_activity acp;
1092 u8 type; 1308 u8 type;
1093 1309
1310 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1311 return;
1312
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 1313 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return; 1314 return;
1096 1315
@@ -1146,10 +1365,7 @@ static void enable_advertising(struct hci_request *req)
1146 cp.min_interval = __constant_cpu_to_le16(0x0800); 1365 cp.min_interval = __constant_cpu_to_le16(0x0800);
1147 cp.max_interval = __constant_cpu_to_le16(0x0800); 1366 cp.max_interval = __constant_cpu_to_le16(0x0800);
1148 cp.type = get_adv_type(hdev); 1367 cp.type = get_adv_type(hdev);
1149 if (bacmp(&hdev->bdaddr, BDADDR_ANY)) 1368 cp.own_address_type = hdev->own_addr_type;
1150 cp.own_address_type = ADDR_LE_DEV_PUBLIC;
1151 else
1152 cp.own_address_type = ADDR_LE_DEV_RANDOM;
1153 cp.channel_map = 0x07; 1369 cp.channel_map = 0x07;
1154 1370
1155 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); 1371 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
@@ -1202,6 +1418,32 @@ unlock:
1202 hci_dev_unlock(hdev); 1418 hci_dev_unlock(hdev);
1203} 1419}
1204 1420
1421static int set_connectable_update_settings(struct hci_dev *hdev,
1422 struct sock *sk, u8 val)
1423{
1424 bool changed = false;
1425 int err;
1426
1427 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1428 changed = true;
1429
1430 if (val) {
1431 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1432 } else {
1433 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1435 }
1436
1437 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1438 if (err < 0)
1439 return err;
1440
1441 if (changed)
1442 return new_settings(hdev, sk);
1443
1444 return 0;
1445}
1446
1205static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, 1447static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1206 u16 len) 1448 u16 len)
1207{ 1449{
@@ -1225,25 +1467,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1225 hci_dev_lock(hdev); 1467 hci_dev_lock(hdev);
1226 1468
1227 if (!hdev_is_powered(hdev)) { 1469 if (!hdev_is_powered(hdev)) {
1228 bool changed = false; 1470 err = set_connectable_update_settings(hdev, sk, cp->val);
1229
1230 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1231 changed = true;
1232
1233 if (cp->val) {
1234 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1235 } else {
1236 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1237 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1238 }
1239
1240 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1241 if (err < 0)
1242 goto failed;
1243
1244 if (changed)
1245 err = new_settings(hdev, sk);
1246
1247 goto failed; 1471 goto failed;
1248 } 1472 }
1249 1473
@@ -1262,16 +1486,24 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1262 1486
1263 hci_req_init(&req, hdev); 1487 hci_req_init(&req, hdev);
1264 1488
1265 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 1489 /* If BR/EDR is not enabled and we disable advertising as a
1266 cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { 1490 * by-product of disabling connectable, we need to update the
1267 1491 * advertising flags.
1492 */
1493 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1494 if (!cp->val) {
1495 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1496 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1497 }
1498 update_adv_data(&req);
1499 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1268 if (cp->val) { 1500 if (cp->val) {
1269 scan = SCAN_PAGE; 1501 scan = SCAN_PAGE;
1270 } else { 1502 } else {
1271 scan = 0; 1503 scan = 0;
1272 1504
1273 if (test_bit(HCI_ISCAN, &hdev->flags) && 1505 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1274 hdev->discov_timeout > 0) 1506 hdev->discov_timeout > 0)
1275 cancel_delayed_work(&hdev->discov_off); 1507 cancel_delayed_work(&hdev->discov_off);
1276 } 1508 }
1277 1509
@@ -1297,8 +1529,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1297 if (err < 0) { 1529 if (err < 0) {
1298 mgmt_pending_remove(cmd); 1530 mgmt_pending_remove(cmd);
1299 if (err == -ENODATA) 1531 if (err == -ENODATA)
1300 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, 1532 err = set_connectable_update_settings(hdev, sk,
1301 hdev); 1533 cp->val);
1302 goto failed; 1534 goto failed;
1303 } 1535 }
1304 1536
@@ -1556,6 +1788,24 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
1556 1788
1557 if (match.sk) 1789 if (match.sk)
1558 sock_put(match.sk); 1790 sock_put(match.sk);
1791
1792 /* Make sure the controller has a good default for
1793 * advertising data. Restrict the update to when LE
1794 * has actually been enabled. During power on, the
1795 * update in powered_update_hci will take care of it.
1796 */
1797 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 struct hci_request req;
1799
1800 hci_dev_lock(hdev);
1801
1802 hci_req_init(&req, hdev);
1803 update_adv_data(&req);
1804 update_scan_rsp_data(&req);
1805 hci_req_run(&req, NULL);
1806
1807 hci_dev_unlock(hdev);
1808 }
1559} 1809}
1560 1810
1561static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1811static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -1623,18 +1873,18 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1623 goto unlock; 1873 goto unlock;
1624 } 1874 }
1625 1875
1876 hci_req_init(&req, hdev);
1877
1626 memset(&hci_cp, 0, sizeof(hci_cp)); 1878 memset(&hci_cp, 0, sizeof(hci_cp));
1627 1879
1628 if (val) { 1880 if (val) {
1629 hci_cp.le = val; 1881 hci_cp.le = val;
1630 hci_cp.simul = lmp_le_br_capable(hdev); 1882 hci_cp.simul = lmp_le_br_capable(hdev);
1883 } else {
1884 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1885 disable_advertising(&req);
1631 } 1886 }
1632 1887
1633 hci_req_init(&req, hdev);
1634
1635 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && !val)
1636 disable_advertising(&req);
1637
1638 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), 1888 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1639 &hci_cp); 1889 &hci_cp);
1640 1890
@@ -2772,8 +3022,11 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2772 update_eir(&req); 3022 update_eir(&req);
2773 } 3023 }
2774 3024
3025 /* The name is stored in the scan response data and so
3026 * no need to udpate the advertising data here.
3027 */
2775 if (lmp_le_capable(hdev)) 3028 if (lmp_le_capable(hdev))
2776 hci_update_ad(&req); 3029 update_scan_rsp_data(&req);
2777 3030
2778 err = hci_req_run(&req, set_name_complete); 3031 err = hci_req_run(&req, set_name_complete);
2779 if (err < 0) 3032 if (err < 0)
@@ -3038,10 +3291,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3038 param_cp.type = LE_SCAN_ACTIVE; 3291 param_cp.type = LE_SCAN_ACTIVE;
3039 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); 3292 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3040 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); 3293 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3041 if (bacmp(&hdev->bdaddr, BDADDR_ANY)) 3294 param_cp.own_address_type = hdev->own_addr_type;
3042 param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
3043 else
3044 param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3045 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 3295 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3046 &param_cp); 3296 &param_cp);
3047 3297
@@ -3725,7 +3975,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3725 goto unlock; 3975 goto unlock;
3726 } 3976 }
3727 3977
3728 /* We need to flip the bit already here so that hci_update_ad 3978 /* We need to flip the bit already here so that update_adv_data
3729 * generates the correct flags. 3979 * generates the correct flags.
3730 */ 3980 */
3731 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 3981 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
@@ -3735,7 +3985,10 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3735 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3985 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3736 set_bredr_scan(&req); 3986 set_bredr_scan(&req);
3737 3987
3738 hci_update_ad(&req); 3988 /* Since only the advertising data flags will change, there
3989 * is no need to update the scan response data.
3990 */
3991 update_adv_data(&req);
3739 3992
3740 err = hci_req_run(&req, set_bredr_complete); 3993 err = hci_req_run(&req, set_bredr_complete);
3741 if (err < 0) 3994 if (err < 0)
@@ -4036,9 +4289,6 @@ static int powered_update_hci(struct hci_dev *hdev)
4036 cp.simul != lmp_host_le_br_capable(hdev)) 4289 cp.simul != lmp_host_le_br_capable(hdev))
4037 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, 4290 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4038 sizeof(cp), &cp); 4291 sizeof(cp), &cp);
4039
4040 /* In case BR/EDR was toggled during the AUTO_OFF phase */
4041 hci_update_ad(&req);
4042 } 4292 }
4043 4293
4044 if (lmp_le_capable(hdev)) { 4294 if (lmp_le_capable(hdev)) {
@@ -4047,6 +4297,15 @@ static int powered_update_hci(struct hci_dev *hdev)
4047 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6, 4297 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4048 &hdev->static_addr); 4298 &hdev->static_addr);
4049 4299
4300 /* Make sure the controller has a good default for
4301 * advertising data. This also applies to the case
4302 * where BR/EDR was toggled during the AUTO_OFF phase.
4303 */
4304 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4305 update_adv_data(&req);
4306 update_scan_rsp_data(&req);
4307 }
4308
4050 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 4309 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4051 enable_advertising(&req); 4310 enable_advertising(&req);
4052 } 4311 }
@@ -4121,59 +4380,91 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4121 mgmt_pending_remove(cmd); 4380 mgmt_pending_remove(cmd);
4122} 4381}
4123 4382
4124int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 4383void mgmt_discoverable_timeout(struct hci_dev *hdev)
4125{ 4384{
4126 bool changed = false; 4385 struct hci_request req;
4127 int err = 0; 4386
4387 hci_dev_lock(hdev);
4388
4389 /* When discoverable timeout triggers, then just make sure
4390 * the limited discoverable flag is cleared. Even in the case
4391 * of a timeout triggered from general discoverable, it is
4392 * safe to unconditionally clear the flag.
4393 */
4394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4395 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4396
4397 hci_req_init(&req, hdev);
4398 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4399 u8 scan = SCAN_PAGE;
4400 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4401 sizeof(scan), &scan);
4402 }
4403 update_class(&req);
4404 update_adv_data(&req);
4405 hci_req_run(&req, NULL);
4406
4407 hdev->discov_timeout = 0;
4408
4409 new_settings(hdev, NULL);
4410
4411 hci_dev_unlock(hdev);
4412}
4413
4414void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4415{
4416 bool changed;
4128 4417
4129 /* Nothing needed here if there's a pending command since that 4418 /* Nothing needed here if there's a pending command since that
4130 * commands request completion callback takes care of everything 4419 * commands request completion callback takes care of everything
4131 * necessary. 4420 * necessary.
4132 */ 4421 */
4133 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) 4422 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4134 return 0; 4423 return;
4135 4424
4136 if (discoverable) { 4425 if (discoverable) {
4137 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4426 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4138 changed = true;
4139 } else { 4427 } else {
4140 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4428 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4141 changed = true; 4429 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4142 } 4430 }
4143 4431
4144 if (changed) 4432 if (changed) {
4145 err = new_settings(hdev, NULL); 4433 struct hci_request req;
4146 4434
4147 return err; 4435 /* In case this change in discoverable was triggered by
4436 * a disabling of connectable there could be a need to
4437 * update the advertising flags.
4438 */
4439 hci_req_init(&req, hdev);
4440 update_adv_data(&req);
4441 hci_req_run(&req, NULL);
4442
4443 new_settings(hdev, NULL);
4444 }
4148} 4445}
4149 4446
4150int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 4447void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4151{ 4448{
4152 bool changed = false; 4449 bool changed;
4153 int err = 0;
4154 4450
4155 /* Nothing needed here if there's a pending command since that 4451 /* Nothing needed here if there's a pending command since that
4156 * commands request completion callback takes care of everything 4452 * commands request completion callback takes care of everything
4157 * necessary. 4453 * necessary.
4158 */ 4454 */
4159 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) 4455 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4160 return 0; 4456 return;
4161 4457
4162 if (connectable) { 4458 if (connectable)
4163 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4459 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4164 changed = true; 4460 else
4165 } else { 4461 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4166 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4167 changed = true;
4168 }
4169 4462
4170 if (changed) 4463 if (changed)
4171 err = new_settings(hdev, NULL); 4464 new_settings(hdev, NULL);
4172
4173 return err;
4174} 4465}
4175 4466
4176int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 4467void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4177{ 4468{
4178 u8 mgmt_err = mgmt_status(status); 4469 u8 mgmt_err = mgmt_status(status);
4179 4470
@@ -4184,12 +4475,10 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4184 if (scan & SCAN_INQUIRY) 4475 if (scan & SCAN_INQUIRY)
4185 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, 4476 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4186 cmd_status_rsp, &mgmt_err); 4477 cmd_status_rsp, &mgmt_err);
4187
4188 return 0;
4189} 4478}
4190 4479
4191int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 4480void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4192 bool persistent) 4481 bool persistent)
4193{ 4482{
4194 struct mgmt_ev_new_link_key ev; 4483 struct mgmt_ev_new_link_key ev;
4195 4484
@@ -4202,10 +4491,10 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4202 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); 4491 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4203 ev.key.pin_len = key->pin_len; 4492 ev.key.pin_len = key->pin_len;
4204 4493
4205 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 4494 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4206} 4495}
4207 4496
4208int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent) 4497void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4209{ 4498{
4210 struct mgmt_ev_new_long_term_key ev; 4499 struct mgmt_ev_new_long_term_key ev;
4211 4500
@@ -4224,8 +4513,18 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4224 memcpy(ev.key.rand, key->rand, sizeof(key->rand)); 4513 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4225 memcpy(ev.key.val, key->val, sizeof(key->val)); 4514 memcpy(ev.key.val, key->val, sizeof(key->val));
4226 4515
4227 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), 4516 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4228 NULL); 4517}
4518
4519static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4520 u8 data_len)
4521{
4522 eir[eir_len++] = sizeof(type) + data_len;
4523 eir[eir_len++] = type;
4524 memcpy(&eir[eir_len], data, data_len);
4525 eir_len += data_len;
4526
4527 return eir_len;
4229} 4528}
4230 4529
4231void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4530void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -4345,7 +4644,7 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4345 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); 4644 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4346} 4645}
4347 4646
4348int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) 4647void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4349{ 4648{
4350 struct mgmt_ev_pin_code_request ev; 4649 struct mgmt_ev_pin_code_request ev;
4351 4650
@@ -4353,52 +4652,45 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4353 ev.addr.type = BDADDR_BREDR; 4652 ev.addr.type = BDADDR_BREDR;
4354 ev.secure = secure; 4653 ev.secure = secure;
4355 4654
4356 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 4655 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4357 NULL);
4358} 4656}
4359 4657
4360int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 4658void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4361 u8 status) 4659 u8 status)
4362{ 4660{
4363 struct pending_cmd *cmd; 4661 struct pending_cmd *cmd;
4364 struct mgmt_rp_pin_code_reply rp; 4662 struct mgmt_rp_pin_code_reply rp;
4365 int err;
4366 4663
4367 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 4664 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4368 if (!cmd) 4665 if (!cmd)
4369 return -ENOENT; 4666 return;
4370 4667
4371 bacpy(&rp.addr.bdaddr, bdaddr); 4668 bacpy(&rp.addr.bdaddr, bdaddr);
4372 rp.addr.type = BDADDR_BREDR; 4669 rp.addr.type = BDADDR_BREDR;
4373 4670
4374 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 4671 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4375 mgmt_status(status), &rp, sizeof(rp)); 4672 mgmt_status(status), &rp, sizeof(rp));
4376 4673
4377 mgmt_pending_remove(cmd); 4674 mgmt_pending_remove(cmd);
4378
4379 return err;
4380} 4675}
4381 4676
4382int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 4677void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4383 u8 status) 4678 u8 status)
4384{ 4679{
4385 struct pending_cmd *cmd; 4680 struct pending_cmd *cmd;
4386 struct mgmt_rp_pin_code_reply rp; 4681 struct mgmt_rp_pin_code_reply rp;
4387 int err;
4388 4682
4389 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 4683 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4390 if (!cmd) 4684 if (!cmd)
4391 return -ENOENT; 4685 return;
4392 4686
4393 bacpy(&rp.addr.bdaddr, bdaddr); 4687 bacpy(&rp.addr.bdaddr, bdaddr);
4394 rp.addr.type = BDADDR_BREDR; 4688 rp.addr.type = BDADDR_BREDR;
4395 4689
4396 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, 4690 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4397 mgmt_status(status), &rp, sizeof(rp)); 4691 mgmt_status(status), &rp, sizeof(rp));
4398 4692
4399 mgmt_pending_remove(cmd); 4693 mgmt_pending_remove(cmd);
4400
4401 return err;
4402} 4694}
4403 4695
4404int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 4696int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -4500,8 +4792,8 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4500 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); 4792 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4501} 4793}
4502 4794
4503int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4795void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4504 u8 addr_type, u8 status) 4796 u8 addr_type, u8 status)
4505{ 4797{
4506 struct mgmt_ev_auth_failed ev; 4798 struct mgmt_ev_auth_failed ev;
4507 4799
@@ -4509,40 +4801,36 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4509 ev.addr.type = link_to_bdaddr(link_type, addr_type); 4801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4510 ev.status = mgmt_status(status); 4802 ev.status = mgmt_status(status);
4511 4803
4512 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 4804 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4513} 4805}
4514 4806
4515int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) 4807void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4516{ 4808{
4517 struct cmd_lookup match = { NULL, hdev }; 4809 struct cmd_lookup match = { NULL, hdev };
4518 bool changed = false; 4810 bool changed;
4519 int err = 0;
4520 4811
4521 if (status) { 4812 if (status) {
4522 u8 mgmt_err = mgmt_status(status); 4813 u8 mgmt_err = mgmt_status(status);
4523 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, 4814 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4524 cmd_status_rsp, &mgmt_err); 4815 cmd_status_rsp, &mgmt_err);
4525 return 0; 4816 return;
4526 } 4817 }
4527 4818
4528 if (test_bit(HCI_AUTH, &hdev->flags)) { 4819 if (test_bit(HCI_AUTH, &hdev->flags))
4529 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 4820 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4530 changed = true; 4821 &hdev->dev_flags);
4531 } else { 4822 else
4532 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 4823 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4533 changed = true; 4824 &hdev->dev_flags);
4534 }
4535 4825
4536 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, 4826 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4537 &match); 4827 &match);
4538 4828
4539 if (changed) 4829 if (changed)
4540 err = new_settings(hdev, match.sk); 4830 new_settings(hdev, match.sk);
4541 4831
4542 if (match.sk) 4832 if (match.sk)
4543 sock_put(match.sk); 4833 sock_put(match.sk);
4544
4545 return err;
4546} 4834}
4547 4835
4548static void clear_eir(struct hci_request *req) 4836static void clear_eir(struct hci_request *req)
@@ -4560,12 +4848,11 @@ static void clear_eir(struct hci_request *req)
4560 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 4848 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4561} 4849}
4562 4850
4563int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) 4851void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4564{ 4852{
4565 struct cmd_lookup match = { NULL, hdev }; 4853 struct cmd_lookup match = { NULL, hdev };
4566 struct hci_request req; 4854 struct hci_request req;
4567 bool changed = false; 4855 bool changed = false;
4568 int err = 0;
4569 4856
4570 if (status) { 4857 if (status) {
4571 u8 mgmt_err = mgmt_status(status); 4858 u8 mgmt_err = mgmt_status(status);
@@ -4573,13 +4860,12 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4573 if (enable && test_and_clear_bit(HCI_SSP_ENABLED, 4860 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4574 &hdev->dev_flags)) { 4861 &hdev->dev_flags)) {
4575 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 4862 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4576 err = new_settings(hdev, NULL); 4863 new_settings(hdev, NULL);
4577 } 4864 }
4578 4865
4579 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, 4866 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4580 &mgmt_err); 4867 &mgmt_err);
4581 4868 return;
4582 return err;
4583 } 4869 }
4584 4870
4585 if (enable) { 4871 if (enable) {
@@ -4596,7 +4882,7 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4596 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); 4882 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4597 4883
4598 if (changed) 4884 if (changed)
4599 err = new_settings(hdev, match.sk); 4885 new_settings(hdev, match.sk);
4600 4886
4601 if (match.sk) 4887 if (match.sk)
4602 sock_put(match.sk); 4888 sock_put(match.sk);
@@ -4609,8 +4895,6 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4609 clear_eir(&req); 4895 clear_eir(&req);
4610 4896
4611 hci_req_run(&req, NULL); 4897 hci_req_run(&req, NULL);
4612
4613 return err;
4614} 4898}
4615 4899
4616static void sk_lookup(struct pending_cmd *cmd, void *data) 4900static void sk_lookup(struct pending_cmd *cmd, void *data)
@@ -4623,33 +4907,30 @@ static void sk_lookup(struct pending_cmd *cmd, void *data)
4623 } 4907 }
4624} 4908}
4625 4909
4626int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 4910void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4627 u8 status) 4911 u8 status)
4628{ 4912{
4629 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; 4913 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4630 int err = 0;
4631 4914
4632 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); 4915 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4633 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); 4916 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4634 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); 4917 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4635 4918
4636 if (!status) 4919 if (!status)
4637 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 4920 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4638 3, NULL); 4921 NULL);
4639 4922
4640 if (match.sk) 4923 if (match.sk)
4641 sock_put(match.sk); 4924 sock_put(match.sk);
4642
4643 return err;
4644} 4925}
4645 4926
4646int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 4927void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4647{ 4928{
4648 struct mgmt_cp_set_local_name ev; 4929 struct mgmt_cp_set_local_name ev;
4649 struct pending_cmd *cmd; 4930 struct pending_cmd *cmd;
4650 4931
4651 if (status) 4932 if (status)
4652 return 0; 4933 return;
4653 4934
4654 memset(&ev, 0, sizeof(ev)); 4935 memset(&ev, 0, sizeof(ev));
4655 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 4936 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
@@ -4663,42 +4944,38 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4663 * HCI dev don't send any mgmt signals. 4944 * HCI dev don't send any mgmt signals.
4664 */ 4945 */
4665 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) 4946 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4666 return 0; 4947 return;
4667 } 4948 }
4668 4949
4669 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 4950 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4670 cmd ? cmd->sk : NULL); 4951 cmd ? cmd->sk : NULL);
4671} 4952}
4672 4953
4673int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 4954void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4674 u8 *randomizer, u8 status) 4955 u8 *randomizer, u8 status)
4675{ 4956{
4676 struct pending_cmd *cmd; 4957 struct pending_cmd *cmd;
4677 int err;
4678 4958
4679 BT_DBG("%s status %u", hdev->name, status); 4959 BT_DBG("%s status %u", hdev->name, status);
4680 4960
4681 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); 4961 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4682 if (!cmd) 4962 if (!cmd)
4683 return -ENOENT; 4963 return;
4684 4964
4685 if (status) { 4965 if (status) {
4686 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 4966 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4687 mgmt_status(status)); 4967 mgmt_status(status));
4688 } else { 4968 } else {
4689 struct mgmt_rp_read_local_oob_data rp; 4969 struct mgmt_rp_read_local_oob_data rp;
4690 4970
4691 memcpy(rp.hash, hash, sizeof(rp.hash)); 4971 memcpy(rp.hash, hash, sizeof(rp.hash));
4692 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 4972 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4693 4973
4694 err = cmd_complete(cmd->sk, hdev->id, 4974 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4695 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp, 4975 0, &rp, sizeof(rp));
4696 sizeof(rp));
4697 } 4976 }
4698 4977
4699 mgmt_pending_remove(cmd); 4978 mgmt_pending_remove(cmd);
4700
4701 return err;
4702} 4979}
4703 4980
4704void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4981void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,