aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth/mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth/mgmt.c')
-rw-r--r--net/bluetooth/mgmt.c1399
1 files changed, 1108 insertions, 291 deletions
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index af8e0a6243b7..b8554d429d88 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35#include "smp.h" 35#include "smp.h"
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 6 38#define MGMT_REVISION 7
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -44,7 +44,7 @@ static const u16 mgmt_commands[] = {
44 MGMT_OP_SET_DISCOVERABLE, 44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE, 45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE, 46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE, 47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY, 48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP, 49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS, 50 MGMT_OP_SET_HS,
@@ -85,6 +85,14 @@ static const u16 mgmt_commands[] = {
85 MGMT_OP_SET_PRIVACY, 85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS, 86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO, 87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
88}; 96};
89 97
90static const u16 mgmt_events[] = { 98static const u16 mgmt_events[] = {
@@ -111,6 +119,12 @@ static const u16 mgmt_events[] = {
111 MGMT_EV_PASSKEY_NOTIFY, 119 MGMT_EV_PASSKEY_NOTIFY,
112 MGMT_EV_NEW_IRK, 120 MGMT_EV_NEW_IRK,
113 MGMT_EV_NEW_CSRK, 121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
114}; 128};
115 129
116#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 130#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -200,6 +214,36 @@ static u8 mgmt_status(u8 hci_status)
200 return MGMT_STATUS_FAILED; 214 return MGMT_STATUS_FAILED;
201} 215}
202 216
217static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219{
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245}
246
203static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 247static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204{ 248{
205 struct sk_buff *skb; 249 struct sk_buff *skb;
@@ -327,7 +371,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
327 371
328 count = 0; 372 count = 0;
329 list_for_each_entry(d, &hci_dev_list, list) { 373 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR) 374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
331 count++; 376 count++;
332 } 377 }
333 378
@@ -340,13 +385,19 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 385
341 count = 0; 386 count = 0;
342 list_for_each_entry(d, &hci_dev_list, list) { 387 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags)) 388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue; 391 continue;
345 392
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags)) 393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
347 continue; 397 continue;
348 398
349 if (d->dev_type == HCI_BREDR) { 399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
350 rp->index[count++] = cpu_to_le16(d->id); 401 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id); 402 BT_DBG("Added hci%u", d->id);
352 } 403 }
@@ -365,19 +416,151 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
365 return err; 416 return err;
366} 417}
367 418
419static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421{
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477}
478
479static bool is_configured(struct hci_dev *hdev)
480{
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490}
491
492static __le32 get_missing_options(struct hci_dev *hdev)
493{
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505}
506
507static int new_options(struct hci_dev *hdev, struct sock *skip)
508{
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513}
514
515static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516{
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521}
522
523static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525{
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549}
550
368static u32 get_supported_settings(struct hci_dev *hdev) 551static u32 get_supported_settings(struct hci_dev *hdev)
369{ 552{
370 u32 settings = 0; 553 u32 settings = 0;
371 554
372 settings |= MGMT_SETTING_POWERED; 555 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE; 556 settings |= MGMT_SETTING_BONDABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS; 557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
375 560
376 if (lmp_bredr_capable(hdev)) { 561 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2) 562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE; 563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR; 564 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY; 565 settings |= MGMT_SETTING_LINK_SECURITY;
383 566
@@ -387,7 +570,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
387 } 570 }
388 571
389 if (lmp_sc_capable(hdev) || 572 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
391 settings |= MGMT_SETTING_SECURE_CONN; 574 settings |= MGMT_SETTING_SECURE_CONN;
392 } 575 }
393 576
@@ -397,6 +580,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
397 settings |= MGMT_SETTING_PRIVACY; 580 settings |= MGMT_SETTING_PRIVACY;
398 } 581 }
399 582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
400 return settings; 587 return settings;
401} 588}
402 589
@@ -416,8 +603,8 @@ static u32 get_current_settings(struct hci_dev *hdev)
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE; 604 settings |= MGMT_SETTING_DISCOVERABLE;
418 605
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 606 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE; 607 settings |= MGMT_SETTING_BONDABLE;
421 608
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR; 610 settings |= MGMT_SETTING_BREDR;
@@ -440,7 +627,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) 627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN; 628 settings |= MGMT_SETTING_SECURE_CONN;
442 629
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags)) 630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS; 631 settings |= MGMT_SETTING_DEBUG_KEYS;
445 632
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
@@ -571,6 +758,22 @@ static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
571 return NULL; 758 return NULL;
572} 759}
573 760
761static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764{
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775}
776
574static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) 777static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
575{ 778{
576 u8 ad_len = 0; 779 u8 ad_len = 0;
@@ -703,6 +906,16 @@ static void update_adv_data(struct hci_request *req)
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); 906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704} 907}
705 908
909int mgmt_update_adv_data(struct hci_dev *hdev)
910{
911 struct hci_request req;
912
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
915
916 return hci_req_run(&req, NULL);
917}
918
706static void create_eir(struct hci_dev *hdev, u8 *data) 919static void create_eir(struct hci_dev *hdev, u8 *data)
707{ 920{
708 u8 *ptr = data; 921 u8 *ptr = data;
@@ -836,6 +1049,13 @@ static bool get_connectable(struct hci_dev *hdev)
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837} 1050}
838 1051
1052static void disable_advertising(struct hci_request *req)
1053{
1054 u8 enable = 0x00;
1055
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057}
1058
839static void enable_advertising(struct hci_request *req) 1059static void enable_advertising(struct hci_request *req)
840{ 1060{
841 struct hci_dev *hdev = req->hdev; 1061 struct hci_dev *hdev = req->hdev;
@@ -843,12 +1063,18 @@ static void enable_advertising(struct hci_request *req)
843 u8 own_addr_type, enable = 0x01; 1063 u8 own_addr_type, enable = 0x01;
844 bool connectable; 1064 bool connectable;
845 1065
846 /* Clear the HCI_ADVERTISING bit temporarily so that the 1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 return;
1068
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1071
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead 1073 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on 1074 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes. 1075 * as soon as the SET_ADV_ENABLE HCI command completes.
850 */ 1076 */
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
852 1078
853 connectable = get_connectable(hdev); 1079 connectable = get_connectable(hdev);
854 1080
@@ -860,8 +1086,8 @@ static void enable_advertising(struct hci_request *req)
860 return; 1086 return;
861 1087
862 memset(&cp, 0, sizeof(cp)); 1088 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800); 1089 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
864 cp.max_interval = cpu_to_le16(0x0800); 1090 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; 1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type; 1092 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map; 1093 cp.channel_map = hdev->le_adv_channel_map;
@@ -871,13 +1097,6 @@ static void enable_advertising(struct hci_request *req)
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872} 1098}
873 1099
874static void disable_advertising(struct hci_request *req)
875{
876 u8 enable = 0x00;
877
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879}
880
881static void service_cache_off(struct work_struct *work) 1100static void service_cache_off(struct work_struct *work)
882{ 1101{
883 struct hci_dev *hdev = container_of(work, struct hci_dev, 1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -909,19 +1128,14 @@ static void rpa_expired(struct work_struct *work)
909 1128
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
911 1130
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) || 1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
913 hci_conn_num(hdev, LE_LINK) > 0)
914 return; 1132 return;
915 1133
916 /* The generation of a new RPA and programming it into the 1134 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function. 1135 * controller happens in the enable_advertising() function.
918 */ 1136 */
919
920 hci_req_init(&req, hdev); 1137 hci_req_init(&req, hdev);
921
922 disable_advertising(&req);
923 enable_advertising(&req); 1138 enable_advertising(&req);
924
925 hci_req_run(&req, NULL); 1139 hci_req_run(&req, NULL);
926} 1140}
927 1141
@@ -938,7 +1152,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
938 * for mgmt we require user-space to explicitly enable 1152 * for mgmt we require user-space to explicitly enable
939 * it 1153 * it
940 */ 1154 */
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 1155 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
942} 1156}
943 1157
944static int read_controller_info(struct sock *sk, struct hci_dev *hdev, 1158static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -984,7 +1198,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
984{ 1198{
985 struct pending_cmd *cmd; 1199 struct pending_cmd *cmd;
986 1200
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
988 if (!cmd) 1202 if (!cmd)
989 return NULL; 1203 return NULL;
990 1204
@@ -1047,7 +1261,7 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1261 }
1048} 1262}
1049 1263
1050static void hci_stop_discovery(struct hci_request *req) 1264static bool hci_stop_discovery(struct hci_request *req)
1051{ 1265{
1052 struct hci_dev *hdev = req->hdev; 1266 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp; 1267 struct hci_cp_remote_name_req_cancel cp;
@@ -1062,32 +1276,39 @@ static void hci_stop_discovery(struct hci_request *req)
1062 hci_req_add_le_scan_disable(req); 1276 hci_req_add_le_scan_disable(req);
1063 } 1277 }
1064 1278
1065 break; 1279 return true;
1066 1280
1067 case DISCOVERY_RESOLVING: 1281 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING); 1283 NAME_PENDING);
1070 if (!e) 1284 if (!e)
1071 return; 1285 break;
1072 1286
1073 bacpy(&cp.bdaddr, &e->data.bdaddr); 1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), 1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp); 1289 &cp);
1076 1290
1077 break; 1291 return true;
1078 1292
1079 default: 1293 default:
1080 /* Passive scanning */ 1294 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1082 hci_req_add_le_scan_disable(req); 1296 hci_req_add_le_scan_disable(req);
1297 return true;
1298 }
1299
1083 break; 1300 break;
1084 } 1301 }
1302
1303 return false;
1085} 1304}
1086 1305
1087static int clean_up_hci_state(struct hci_dev *hdev) 1306static int clean_up_hci_state(struct hci_dev *hdev)
1088{ 1307{
1089 struct hci_request req; 1308 struct hci_request req;
1090 struct hci_conn *conn; 1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1091 1312
1092 hci_req_init(&req, hdev); 1313 hci_req_init(&req, hdev);
1093 1314
@@ -1097,10 +1318,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1098 } 1319 }
1099 1320
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1101 disable_advertising(&req); 1322 disable_advertising(&req);
1102 1323
1103 hci_stop_discovery(&req); 1324 discov_stopped = hci_stop_discovery(&req);
1104 1325
1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 struct hci_cp_disconnect dc; 1327 struct hci_cp_disconnect dc;
@@ -1134,7 +1355,11 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1134 } 1355 }
1135 } 1356 }
1136 1357
1137 return hci_req_run(&req, clean_up_hci_complete); 1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362 return err;
1138} 1363}
1139 1364
1140static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, 1365static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1203,36 +1428,6 @@ failed:
1203 return err; 1428 return err;
1204} 1429}
1205 1430
1206static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 struct sock *skip_sk)
1208{
1209 struct sk_buff *skb;
1210 struct mgmt_hdr *hdr;
1211
1212 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1213 if (!skb)
1214 return -ENOMEM;
1215
1216 hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 hdr->opcode = cpu_to_le16(event);
1218 if (hdev)
1219 hdr->index = cpu_to_le16(hdev->id);
1220 else
1221 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 hdr->len = cpu_to_le16(data_len);
1223
1224 if (data)
1225 memcpy(skb_put(skb, data_len), data, data_len);
1226
1227 /* Time stamp */
1228 __net_timestamp(skb);
1229
1230 hci_send_to_control(skb, skip_sk);
1231 kfree_skb(skb);
1232
1233 return 0;
1234}
1235
1236static int new_settings(struct hci_dev *hdev, struct sock *skip) 1431static int new_settings(struct hci_dev *hdev, struct sock *skip)
1237{ 1432{
1238 __le32 ev; 1433 __le32 ev;
@@ -1242,6 +1437,11 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); 1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1243} 1438}
1244 1439
1440int mgmt_new_settings(struct hci_dev *hdev)
1441{
1442 return new_settings(hdev, NULL);
1443}
1444
1245struct cmd_lookup { 1445struct cmd_lookup {
1246 struct sock *sk; 1446 struct sock *sk;
1247 struct hci_dev *hdev; 1447 struct hci_dev *hdev;
@@ -1553,7 +1753,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1553{ 1753{
1554 struct pending_cmd *cmd; 1754 struct pending_cmd *cmd;
1555 struct mgmt_mode *cp; 1755 struct mgmt_mode *cp;
1556 bool changed; 1756 bool conn_changed, discov_changed;
1557 1757
1558 BT_DBG("status 0x%02x", status); 1758 BT_DBG("status 0x%02x", status);
1559 1759
@@ -1570,15 +1770,25 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1570 } 1770 }
1571 1771
1572 cp = cmd->param; 1772 cp = cmd->param;
1573 if (cp->val) 1773 if (cp->val) {
1574 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1575 else 1775 &hdev->dev_flags);
1576 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1782 }
1577 1783
1578 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); 1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1579 1785
1580 if (changed) 1786 if (conn_changed || discov_changed) {
1581 new_settings(hdev, cmd->sk); 1787 new_settings(hdev, cmd->sk);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1791 }
1582 1792
1583remove_cmd: 1793remove_cmd:
1584 mgmt_pending_remove(cmd); 1794 mgmt_pending_remove(cmd);
@@ -1607,8 +1817,10 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
1607 if (err < 0) 1817 if (err < 0)
1608 return err; 1818 return err;
1609 1819
1610 if (changed) 1820 if (changed) {
1821 hci_update_background_scan(hdev);
1611 return new_settings(hdev, sk); 1822 return new_settings(hdev, sk);
1823 }
1612 1824
1613 return 0; 1825 return 0;
1614} 1826}
@@ -1669,7 +1881,18 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1669 if (cp->val) { 1881 if (cp->val) {
1670 scan = SCAN_PAGE; 1882 scan = SCAN_PAGE;
1671 } else { 1883 } else {
1672 scan = 0; 1884 /* If we don't have any whitelist entries just
1885 * disable all scanning. If there are entries
1886 * and we had both page and inquiry scanning
1887 * enabled then fall back to only page scanning.
1888 * Otherwise no changes are needed.
1889 */
1890 if (list_empty(&hdev->whitelist))
1891 scan = SCAN_DISABLED;
1892 else if (test_bit(HCI_ISCAN, &hdev->flags))
1893 scan = SCAN_PAGE;
1894 else
1895 goto no_scan_update;
1673 1896
1674 if (test_bit(HCI_ISCAN, &hdev->flags) && 1897 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1675 hdev->discov_timeout > 0) 1898 hdev->discov_timeout > 0)
@@ -1679,6 +1902,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1679 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1902 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1680 } 1903 }
1681 1904
1905no_scan_update:
1682 /* If we're going from non-connectable to connectable or 1906 /* If we're going from non-connectable to connectable or
1683 * vice-versa when fast connectable is enabled ensure that fast 1907 * vice-versa when fast connectable is enabled ensure that fast
1684 * connectable gets disabled. write_fast_connectable won't do 1908 * connectable gets disabled. write_fast_connectable won't do
@@ -1688,11 +1912,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1688 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) 1912 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1689 write_fast_connectable(&req, false); 1913 write_fast_connectable(&req, false);
1690 1914
1691 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && 1915 /* Update the advertising parameters if necessary */
1692 hci_conn_num(hdev, LE_LINK) == 0) { 1916 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1693 disable_advertising(&req);
1694 enable_advertising(&req); 1917 enable_advertising(&req);
1695 }
1696 1918
1697 err = hci_req_run(&req, set_connectable_complete); 1919 err = hci_req_run(&req, set_connectable_complete);
1698 if (err < 0) { 1920 if (err < 0) {
@@ -1708,7 +1930,7 @@ failed:
1708 return err; 1930 return err;
1709} 1931}
1710 1932
1711static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data, 1933static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1712 u16 len) 1934 u16 len)
1713{ 1935{
1714 struct mgmt_mode *cp = data; 1936 struct mgmt_mode *cp = data;
@@ -1718,17 +1940,17 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1718 BT_DBG("request for %s", hdev->name); 1940 BT_DBG("request for %s", hdev->name);
1719 1941
1720 if (cp->val != 0x00 && cp->val != 0x01) 1942 if (cp->val != 0x00 && cp->val != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE, 1943 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1722 MGMT_STATUS_INVALID_PARAMS); 1944 MGMT_STATUS_INVALID_PARAMS);
1723 1945
1724 hci_dev_lock(hdev); 1946 hci_dev_lock(hdev);
1725 1947
1726 if (cp->val) 1948 if (cp->val)
1727 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags); 1949 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1728 else 1950 else
1729 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 1951 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1730 1952
1731 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); 1953 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1732 if (err < 0) 1954 if (err < 0)
1733 goto unlock; 1955 goto unlock;
1734 1956
@@ -1877,6 +2099,10 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1877 goto failed; 2099 goto failed;
1878 } 2100 }
1879 2101
2102 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2103 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2104 sizeof(cp->val), &cp->val);
2105
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); 2106 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1881 if (err < 0) { 2107 if (err < 0) {
1882 mgmt_pending_remove(cmd); 2108 mgmt_pending_remove(cmd);
@@ -1973,6 +2199,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
1973 update_scan_rsp_data(&req); 2199 update_scan_rsp_data(&req);
1974 hci_req_run(&req, NULL); 2200 hci_req_run(&req, NULL);
1975 2201
2202 hci_update_background_scan(hdev);
2203
1976 hci_dev_unlock(hdev); 2204 hci_dev_unlock(hdev);
1977 } 2205 }
1978} 2206}
@@ -2048,9 +2276,9 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 2276
2049 if (val) { 2277 if (val) {
2050 hci_cp.le = val; 2278 hci_cp.le = val;
2051 hci_cp.simul = lmp_le_br_capable(hdev); 2279 hci_cp.simul = 0x00;
2052 } else { 2280 } else {
2053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 2281 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2054 disable_advertising(&req); 2282 disable_advertising(&req);
2055 } 2283 }
2056 2284
@@ -2373,6 +2601,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2373 u16 len) 2601 u16 len)
2374{ 2602{
2375 struct mgmt_cp_load_link_keys *cp = data; 2603 struct mgmt_cp_load_link_keys *cp = data;
2604 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2605 sizeof(struct mgmt_link_key_info));
2376 u16 key_count, expected_len; 2606 u16 key_count, expected_len;
2377 bool changed; 2607 bool changed;
2378 int i; 2608 int i;
@@ -2384,6 +2614,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2384 MGMT_STATUS_NOT_SUPPORTED); 2614 MGMT_STATUS_NOT_SUPPORTED);
2385 2615
2386 key_count = __le16_to_cpu(cp->key_count); 2616 key_count = __le16_to_cpu(cp->key_count);
2617 if (key_count > max_key_count) {
2618 BT_ERR("load_link_keys: too big key_count value %u",
2619 key_count);
2620 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2621 MGMT_STATUS_INVALID_PARAMS);
2622 }
2387 2623
2388 expected_len = sizeof(*cp) + key_count * 2624 expected_len = sizeof(*cp) + key_count *
2389 sizeof(struct mgmt_link_key_info); 2625 sizeof(struct mgmt_link_key_info);
@@ -2414,9 +2650,11 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2414 hci_link_keys_clear(hdev); 2650 hci_link_keys_clear(hdev);
2415 2651
2416 if (cp->debug_keys) 2652 if (cp->debug_keys)
2417 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2653 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2654 &hdev->dev_flags);
2418 else 2655 else
2419 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2656 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2657 &hdev->dev_flags);
2420 2658
2421 if (changed) 2659 if (changed)
2422 new_settings(hdev, NULL); 2660 new_settings(hdev, NULL);
@@ -2424,8 +2662,14 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2424 for (i = 0; i < key_count; i++) { 2662 for (i = 0; i < key_count; i++) {
2425 struct mgmt_link_key_info *key = &cp->keys[i]; 2663 struct mgmt_link_key_info *key = &cp->keys[i];
2426 2664
2427 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val, 2665 /* Always ignore debug keys and require a new pairing if
2428 key->type, key->pin_len); 2666 * the user wants to use them.
2667 */
2668 if (key->type == HCI_LK_DEBUG_COMBINATION)
2669 continue;
2670
2671 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2672 key->type, key->pin_len, NULL);
2429 } 2673 }
2430 2674
2431 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); 2675 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
@@ -2766,6 +3010,10 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2766 3010
2767 BT_DBG(""); 3011 BT_DBG("");
2768 3012
3013 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3015 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3016
2769 hci_dev_lock(hdev); 3017 hci_dev_lock(hdev);
2770 3018
2771 hdev->io_capability = cp->io_capability; 3019 hdev->io_capability = cp->io_capability;
@@ -2878,6 +3126,11 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2878 MGMT_STATUS_INVALID_PARAMS, 3126 MGMT_STATUS_INVALID_PARAMS,
2879 &rp, sizeof(rp)); 3127 &rp, sizeof(rp));
2880 3128
3129 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3130 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3131 MGMT_STATUS_INVALID_PARAMS,
3132 &rp, sizeof(rp));
3133
2881 hci_dev_lock(hdev); 3134 hci_dev_lock(hdev);
2882 3135
2883 if (!hdev_is_powered(hdev)) { 3136 if (!hdev_is_powered(hdev)) {
@@ -2902,8 +3155,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 else 3155 else
2903 addr_type = ADDR_LE_DEV_RANDOM; 3156 addr_type = ADDR_LE_DEV_RANDOM;
2904 3157
3158 /* When pairing a new device, it is expected to remember
3159 * this device for future connections. Adding the connection
3160 * parameter information ahead of time allows tracking
3161 * of the slave preferred values and will speed up any
3162 * further connection establishment.
3163 *
3164 * If connection parameters already exist, then they
3165 * will be kept and this function does nothing.
3166 */
3167 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3168
2905 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, 3169 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2906 sec_level, auth_type); 3170 sec_level, HCI_LE_CONN_TIMEOUT,
3171 HCI_ROLE_MASTER);
2907 } 3172 }
2908 3173
2909 if (IS_ERR(conn)) { 3174 if (IS_ERR(conn)) {
@@ -2948,8 +3213,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2948 conn->io_capability = cp->io_cap; 3213 conn->io_capability = cp->io_cap;
2949 cmd->user_data = conn; 3214 cmd->user_data = conn;
2950 3215
2951 if (conn->state == BT_CONNECTED && 3216 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2952 hci_conn_security(conn, sec_level, auth_type)) 3217 hci_conn_security(conn, sec_level, auth_type, true))
2953 pairing_complete(cmd, 0); 3218 pairing_complete(cmd, 0);
2954 3219
2955 err = 0; 3220 err = 0;
@@ -3031,14 +3296,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3031 } 3296 }
3032 3297
3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3298 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3299 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3041
3042 if (!err) 3300 if (!err)
3043 err = cmd_complete(sk, hdev->id, mgmt_op, 3301 err = cmd_complete(sk, hdev->id, mgmt_op,
3044 MGMT_STATUS_SUCCESS, addr, 3302 MGMT_STATUS_SUCCESS, addr,
@@ -3516,11 +3774,21 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3516 goto failed; 3774 goto failed;
3517 } 3775 }
3518 3776
3519 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 3777 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 3778 /* Don't let discovery abort an outgoing
3521 MGMT_STATUS_REJECTED); 3779 * connection attempt that's using directed
3522 mgmt_pending_remove(cmd); 3780 * advertising.
3523 goto failed; 3781 */
3782 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3783 BT_CONNECT)) {
3784 err = cmd_status(sk, hdev->id,
3785 MGMT_OP_START_DISCOVERY,
3786 MGMT_STATUS_REJECTED);
3787 mgmt_pending_remove(cmd);
3788 goto failed;
3789 }
3790
3791 disable_advertising(&req);
3524 } 3792 }
3525 3793
3526 /* If controller is scanning, it means the background scanning 3794 /* If controller is scanning, it means the background scanning
@@ -3723,12 +3991,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3723 3991
3724 hci_dev_lock(hdev); 3992 hci_dev_lock(hdev);
3725 3993
3726 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 3994 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3727 if (err < 0) 3995 cp->addr.type);
3996 if (err < 0) {
3728 status = MGMT_STATUS_FAILED; 3997 status = MGMT_STATUS_FAILED;
3729 else 3998 goto done;
3730 status = MGMT_STATUS_SUCCESS; 3999 }
4000
4001 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4002 sk);
4003 status = MGMT_STATUS_SUCCESS;
3731 4004
4005done:
3732 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 4006 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3733 &cp->addr, sizeof(cp->addr)); 4007 &cp->addr, sizeof(cp->addr));
3734 4008
@@ -3753,12 +4027,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3753 4027
3754 hci_dev_lock(hdev); 4028 hci_dev_lock(hdev);
3755 4029
3756 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 4030 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3757 if (err < 0) 4031 cp->addr.type);
4032 if (err < 0) {
3758 status = MGMT_STATUS_INVALID_PARAMS; 4033 status = MGMT_STATUS_INVALID_PARAMS;
3759 else 4034 goto done;
3760 status = MGMT_STATUS_SUCCESS; 4035 }
4036
4037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4038 sk);
4039 status = MGMT_STATUS_SUCCESS;
3761 4040
4041done:
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 4042 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3763 &cp->addr, sizeof(cp->addr)); 4043 &cp->addr, sizeof(cp->addr));
3764 4044
@@ -3813,6 +4093,11 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3813 return; 4093 return;
3814 } 4094 }
3815 4095
4096 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4097 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4098 else
4099 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4100
3816 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, 4101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3817 &match); 4102 &match);
3818 4103
@@ -3853,7 +4138,9 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3853 * necessary). 4138 * necessary).
3854 */ 4139 */
3855 if (!hdev_is_powered(hdev) || val == enabled || 4140 if (!hdev_is_powered(hdev) || val == enabled ||
3856 hci_conn_num(hdev, LE_LINK) > 0) { 4141 hci_conn_num(hdev, LE_LINK) > 0 ||
4142 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4143 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3857 bool changed = false; 4144 bool changed = false;
3858 4145
3859 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 4146 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
@@ -4105,7 +4392,8 @@ static void set_bredr_scan(struct hci_request *req)
4105 */ 4392 */
4106 write_fast_connectable(req, false); 4393 write_fast_connectable(req, false);
4107 4394
4108 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4395 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4396 !list_empty(&hdev->whitelist))
4109 scan |= SCAN_PAGE; 4397 scan |= SCAN_PAGE;
4110 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4398 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4111 scan |= SCAN_INQUIRY; 4399 scan |= SCAN_INQUIRY;
@@ -4219,7 +4507,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4219 4507
4220 hci_req_init(&req, hdev); 4508 hci_req_init(&req, hdev);
4221 4509
4222 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4510 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4511 !list_empty(&hdev->whitelist))
4223 set_bredr_scan(&req); 4512 set_bredr_scan(&req);
4224 4513
4225 /* Since only the advertising data flags will change, there 4514 /* Since only the advertising data flags will change, there
@@ -4252,7 +4541,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4252 status); 4541 status);
4253 4542
4254 if (!lmp_sc_capable(hdev) && 4543 if (!lmp_sc_capable(hdev) &&
4255 !test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 4544 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4257 MGMT_STATUS_NOT_SUPPORTED); 4546 MGMT_STATUS_NOT_SUPPORTED);
4258 4547
@@ -4328,21 +4617,37 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len) 4617 void *data, u16 len)
4329{ 4618{
4330 struct mgmt_mode *cp = data; 4619 struct mgmt_mode *cp = data;
4331 bool changed; 4620 bool changed, use_changed;
4332 int err; 4621 int err;
4333 4622
4334 BT_DBG("request for %s", hdev->name); 4623 BT_DBG("request for %s", hdev->name);
4335 4624
4336 if (cp->val != 0x00 && cp->val != 0x01) 4625 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, 4626 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4338 MGMT_STATUS_INVALID_PARAMS); 4627 MGMT_STATUS_INVALID_PARAMS);
4339 4628
4340 hci_dev_lock(hdev); 4629 hci_dev_lock(hdev);
4341 4630
4342 if (cp->val) 4631 if (cp->val)
4343 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4632 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4633 &hdev->dev_flags);
4634 else
4635 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4636 &hdev->dev_flags);
4637
4638 if (cp->val == 0x02)
4639 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4640 &hdev->dev_flags);
4344 else 4641 else
4345 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4642 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4643 &hdev->dev_flags);
4644
4645 if (hdev_is_powered(hdev) && use_changed &&
4646 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4647 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4648 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4649 sizeof(mode), &mode);
4650 }
4346 4651
4347 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); 4652 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4348 if (err < 0) 4653 if (err < 0)
@@ -4426,6 +4731,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4426 u16 len) 4731 u16 len)
4427{ 4732{
4428 struct mgmt_cp_load_irks *cp = cp_data; 4733 struct mgmt_cp_load_irks *cp = cp_data;
4734 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4735 sizeof(struct mgmt_irk_info));
4429 u16 irk_count, expected_len; 4736 u16 irk_count, expected_len;
4430 int i, err; 4737 int i, err;
4431 4738
@@ -4436,6 +4743,11 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4436 MGMT_STATUS_NOT_SUPPORTED); 4743 MGMT_STATUS_NOT_SUPPORTED);
4437 4744
4438 irk_count = __le16_to_cpu(cp->irk_count); 4745 irk_count = __le16_to_cpu(cp->irk_count);
4746 if (irk_count > max_irk_count) {
4747 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4748 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4749 MGMT_STATUS_INVALID_PARAMS);
4750 }
4439 4751
4440 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); 4752 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4441 if (expected_len != len) { 4753 if (expected_len != len) {
@@ -4505,6 +4817,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4505 void *cp_data, u16 len) 4817 void *cp_data, u16 len)
4506{ 4818{
4507 struct mgmt_cp_load_long_term_keys *cp = cp_data; 4819 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4820 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4821 sizeof(struct mgmt_ltk_info));
4508 u16 key_count, expected_len; 4822 u16 key_count, expected_len;
4509 int i, err; 4823 int i, err;
4510 4824
@@ -4515,6 +4829,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4515 MGMT_STATUS_NOT_SUPPORTED); 4829 MGMT_STATUS_NOT_SUPPORTED);
4516 4830
4517 key_count = __le16_to_cpu(cp->key_count); 4831 key_count = __le16_to_cpu(cp->key_count);
4832 if (key_count > max_key_count) {
4833 BT_ERR("load_ltks: too big key_count value %u", key_count);
4834 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4835 MGMT_STATUS_INVALID_PARAMS);
4836 }
4518 4837
4519 expected_len = sizeof(*cp) + key_count * 4838 expected_len = sizeof(*cp) + key_count *
4520 sizeof(struct mgmt_ltk_info); 4839 sizeof(struct mgmt_ltk_info);
@@ -4550,9 +4869,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4550 addr_type = ADDR_LE_DEV_RANDOM; 4869 addr_type = ADDR_LE_DEV_RANDOM;
4551 4870
4552 if (key->master) 4871 if (key->master)
4553 type = HCI_SMP_LTK; 4872 type = SMP_LTK;
4554 else 4873 else
4555 type = HCI_SMP_LTK_SLAVE; 4874 type = SMP_LTK_SLAVE;
4556 4875
4557 switch (key->type) { 4876 switch (key->type) {
4558 case MGMT_LTK_UNAUTHENTICATED: 4877 case MGMT_LTK_UNAUTHENTICATED:
@@ -4790,6 +5109,561 @@ unlock:
4790 return err; 5109 return err;
4791} 5110}
4792 5111
5112static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5113{
5114 struct mgmt_cp_get_clock_info *cp;
5115 struct mgmt_rp_get_clock_info rp;
5116 struct hci_cp_read_clock *hci_cp;
5117 struct pending_cmd *cmd;
5118 struct hci_conn *conn;
5119
5120 BT_DBG("%s status %u", hdev->name, status);
5121
5122 hci_dev_lock(hdev);
5123
5124 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5125 if (!hci_cp)
5126 goto unlock;
5127
5128 if (hci_cp->which) {
5129 u16 handle = __le16_to_cpu(hci_cp->handle);
5130 conn = hci_conn_hash_lookup_handle(hdev, handle);
5131 } else {
5132 conn = NULL;
5133 }
5134
5135 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5136 if (!cmd)
5137 goto unlock;
5138
5139 cp = cmd->param;
5140
5141 memset(&rp, 0, sizeof(rp));
5142 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5143
5144 if (status)
5145 goto send_rsp;
5146
5147 rp.local_clock = cpu_to_le32(hdev->clock);
5148
5149 if (conn) {
5150 rp.piconet_clock = cpu_to_le32(conn->clock);
5151 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5152 }
5153
5154send_rsp:
5155 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5156 &rp, sizeof(rp));
5157 mgmt_pending_remove(cmd);
5158 if (conn)
5159 hci_conn_drop(conn);
5160
5161unlock:
5162 hci_dev_unlock(hdev);
5163}
5164
5165static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5166 u16 len)
5167{
5168 struct mgmt_cp_get_clock_info *cp = data;
5169 struct mgmt_rp_get_clock_info rp;
5170 struct hci_cp_read_clock hci_cp;
5171 struct pending_cmd *cmd;
5172 struct hci_request req;
5173 struct hci_conn *conn;
5174 int err;
5175
5176 BT_DBG("%s", hdev->name);
5177
5178 memset(&rp, 0, sizeof(rp));
5179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5180 rp.addr.type = cp->addr.type;
5181
5182 if (cp->addr.type != BDADDR_BREDR)
5183 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5184 MGMT_STATUS_INVALID_PARAMS,
5185 &rp, sizeof(rp));
5186
5187 hci_dev_lock(hdev);
5188
5189 if (!hdev_is_powered(hdev)) {
5190 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5191 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5192 goto unlock;
5193 }
5194
5195 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5197 &cp->addr.bdaddr);
5198 if (!conn || conn->state != BT_CONNECTED) {
5199 err = cmd_complete(sk, hdev->id,
5200 MGMT_OP_GET_CLOCK_INFO,
5201 MGMT_STATUS_NOT_CONNECTED,
5202 &rp, sizeof(rp));
5203 goto unlock;
5204 }
5205 } else {
5206 conn = NULL;
5207 }
5208
5209 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5210 if (!cmd) {
5211 err = -ENOMEM;
5212 goto unlock;
5213 }
5214
5215 hci_req_init(&req, hdev);
5216
5217 memset(&hci_cp, 0, sizeof(hci_cp));
5218 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5219
5220 if (conn) {
5221 hci_conn_hold(conn);
5222 cmd->user_data = conn;
5223
5224 hci_cp.handle = cpu_to_le16(conn->handle);
5225 hci_cp.which = 0x01; /* Piconet clock */
5226 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5227 }
5228
5229 err = hci_req_run(&req, get_clock_info_complete);
5230 if (err < 0)
5231 mgmt_pending_remove(cmd);
5232
5233unlock:
5234 hci_dev_unlock(hdev);
5235 return err;
5236}
5237
5238/* Helper for Add/Remove Device commands */
5239static void update_page_scan(struct hci_dev *hdev, u8 scan)
5240{
5241 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5242 return;
5243
5244 if (!hdev_is_powered(hdev))
5245 return;
5246
5247 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5248 * make any changes to page scanning.
5249 */
5250 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5251 return;
5252
5253 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5254 scan |= SCAN_INQUIRY;
5255
5256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5257}
5258
5259static void device_added(struct sock *sk, struct hci_dev *hdev,
5260 bdaddr_t *bdaddr, u8 type, u8 action)
5261{
5262 struct mgmt_ev_device_added ev;
5263
5264 bacpy(&ev.addr.bdaddr, bdaddr);
5265 ev.addr.type = type;
5266 ev.action = action;
5267
5268 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5269}
5270
5271static int add_device(struct sock *sk, struct hci_dev *hdev,
5272 void *data, u16 len)
5273{
5274 struct mgmt_cp_add_device *cp = data;
5275 u8 auto_conn, addr_type;
5276 int err;
5277
5278 BT_DBG("%s", hdev->name);
5279
5280 if (!bdaddr_type_is_valid(cp->addr.type) ||
5281 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5282 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5285
5286 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5287 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5288 MGMT_STATUS_INVALID_PARAMS,
5289 &cp->addr, sizeof(cp->addr));
5290
5291 hci_dev_lock(hdev);
5292
5293 if (cp->addr.type == BDADDR_BREDR) {
5294 bool update_scan;
5295
5296 /* Only incoming connections action is supported for now */
5297 if (cp->action != 0x01) {
5298 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 MGMT_STATUS_INVALID_PARAMS,
5300 &cp->addr, sizeof(cp->addr));
5301 goto unlock;
5302 }
5303
5304 update_scan = list_empty(&hdev->whitelist);
5305
5306 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5307 cp->addr.type);
5308 if (err)
5309 goto unlock;
5310
5311 if (update_scan)
5312 update_page_scan(hdev, SCAN_PAGE);
5313
5314 goto added;
5315 }
5316
5317 if (cp->addr.type == BDADDR_LE_PUBLIC)
5318 addr_type = ADDR_LE_DEV_PUBLIC;
5319 else
5320 addr_type = ADDR_LE_DEV_RANDOM;
5321
5322 if (cp->action == 0x02)
5323 auto_conn = HCI_AUTO_CONN_ALWAYS;
5324 else if (cp->action == 0x01)
5325 auto_conn = HCI_AUTO_CONN_DIRECT;
5326 else
5327 auto_conn = HCI_AUTO_CONN_REPORT;
5328
5329 /* If the connection parameters don't exist for this device,
5330 * they will be created and configured with defaults.
5331 */
5332 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5333 auto_conn) < 0) {
5334 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5335 MGMT_STATUS_FAILED,
5336 &cp->addr, sizeof(cp->addr));
5337 goto unlock;
5338 }
5339
5340added:
5341 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5342
5343 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5344 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5345
5346unlock:
5347 hci_dev_unlock(hdev);
5348 return err;
5349}
5350
5351static void device_removed(struct sock *sk, struct hci_dev *hdev,
5352 bdaddr_t *bdaddr, u8 type)
5353{
5354 struct mgmt_ev_device_removed ev;
5355
5356 bacpy(&ev.addr.bdaddr, bdaddr);
5357 ev.addr.type = type;
5358
5359 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5360}
5361
5362static int remove_device(struct sock *sk, struct hci_dev *hdev,
5363 void *data, u16 len)
5364{
5365 struct mgmt_cp_remove_device *cp = data;
5366 int err;
5367
5368 BT_DBG("%s", hdev->name);
5369
5370 hci_dev_lock(hdev);
5371
5372 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5373 struct hci_conn_params *params;
5374 u8 addr_type;
5375
5376 if (!bdaddr_type_is_valid(cp->addr.type)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5378 MGMT_STATUS_INVALID_PARAMS,
5379 &cp->addr, sizeof(cp->addr));
5380 goto unlock;
5381 }
5382
5383 if (cp->addr.type == BDADDR_BREDR) {
5384 err = hci_bdaddr_list_del(&hdev->whitelist,
5385 &cp->addr.bdaddr,
5386 cp->addr.type);
5387 if (err) {
5388 err = cmd_complete(sk, hdev->id,
5389 MGMT_OP_REMOVE_DEVICE,
5390 MGMT_STATUS_INVALID_PARAMS,
5391 &cp->addr, sizeof(cp->addr));
5392 goto unlock;
5393 }
5394
5395 if (list_empty(&hdev->whitelist))
5396 update_page_scan(hdev, SCAN_DISABLED);
5397
5398 device_removed(sk, hdev, &cp->addr.bdaddr,
5399 cp->addr.type);
5400 goto complete;
5401 }
5402
5403 if (cp->addr.type == BDADDR_LE_PUBLIC)
5404 addr_type = ADDR_LE_DEV_PUBLIC;
5405 else
5406 addr_type = ADDR_LE_DEV_RANDOM;
5407
5408 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5409 addr_type);
5410 if (!params) {
5411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5412 MGMT_STATUS_INVALID_PARAMS,
5413 &cp->addr, sizeof(cp->addr));
5414 goto unlock;
5415 }
5416
5417 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5418 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5419 MGMT_STATUS_INVALID_PARAMS,
5420 &cp->addr, sizeof(cp->addr));
5421 goto unlock;
5422 }
5423
5424 list_del(&params->action);
5425 list_del(&params->list);
5426 kfree(params);
5427 hci_update_background_scan(hdev);
5428
5429 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5430 } else {
5431 struct hci_conn_params *p, *tmp;
5432 struct bdaddr_list *b, *btmp;
5433
5434 if (cp->addr.type) {
5435 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5436 MGMT_STATUS_INVALID_PARAMS,
5437 &cp->addr, sizeof(cp->addr));
5438 goto unlock;
5439 }
5440
5441 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5442 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5443 list_del(&b->list);
5444 kfree(b);
5445 }
5446
5447 update_page_scan(hdev, SCAN_DISABLED);
5448
5449 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5450 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5451 continue;
5452 device_removed(sk, hdev, &p->addr, p->addr_type);
5453 list_del(&p->action);
5454 list_del(&p->list);
5455 kfree(p);
5456 }
5457
5458 BT_DBG("All LE connection parameters were removed");
5459
5460 hci_update_background_scan(hdev);
5461 }
5462
5463complete:
5464 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5465 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5466
5467unlock:
5468 hci_dev_unlock(hdev);
5469 return err;
5470}
5471
5472static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5473 u16 len)
5474{
5475 struct mgmt_cp_load_conn_param *cp = data;
5476 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5477 sizeof(struct mgmt_conn_param));
5478 u16 param_count, expected_len;
5479 int i;
5480
5481 if (!lmp_le_capable(hdev))
5482 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5483 MGMT_STATUS_NOT_SUPPORTED);
5484
5485 param_count = __le16_to_cpu(cp->param_count);
5486 if (param_count > max_param_count) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5488 param_count);
5489 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5490 MGMT_STATUS_INVALID_PARAMS);
5491 }
5492
5493 expected_len = sizeof(*cp) + param_count *
5494 sizeof(struct mgmt_conn_param);
5495 if (expected_len != len) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5497 expected_len, len);
5498 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5499 MGMT_STATUS_INVALID_PARAMS);
5500 }
5501
5502 BT_DBG("%s param_count %u", hdev->name, param_count);
5503
5504 hci_dev_lock(hdev);
5505
5506 hci_conn_params_clear_disabled(hdev);
5507
5508 for (i = 0; i < param_count; i++) {
5509 struct mgmt_conn_param *param = &cp->params[i];
5510 struct hci_conn_params *hci_param;
5511 u16 min, max, latency, timeout;
5512 u8 addr_type;
5513
5514 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5515 param->addr.type);
5516
5517 if (param->addr.type == BDADDR_LE_PUBLIC) {
5518 addr_type = ADDR_LE_DEV_PUBLIC;
5519 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5520 addr_type = ADDR_LE_DEV_RANDOM;
5521 } else {
5522 BT_ERR("Ignoring invalid connection parameters");
5523 continue;
5524 }
5525
5526 min = le16_to_cpu(param->min_interval);
5527 max = le16_to_cpu(param->max_interval);
5528 latency = le16_to_cpu(param->latency);
5529 timeout = le16_to_cpu(param->timeout);
5530
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min, max, latency, timeout);
5533
5534 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5536 continue;
5537 }
5538
5539 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5540 addr_type);
5541 if (!hci_param) {
5542 BT_ERR("Failed to add connection parameters");
5543 continue;
5544 }
5545
5546 hci_param->conn_min_interval = min;
5547 hci_param->conn_max_interval = max;
5548 hci_param->conn_latency = latency;
5549 hci_param->supervision_timeout = timeout;
5550 }
5551
5552 hci_dev_unlock(hdev);
5553
5554 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5555}
5556
5557static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5558 void *data, u16 len)
5559{
5560 struct mgmt_cp_set_external_config *cp = data;
5561 bool changed;
5562 int err;
5563
5564 BT_DBG("%s", hdev->name);
5565
5566 if (hdev_is_powered(hdev))
5567 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5568 MGMT_STATUS_REJECTED);
5569
5570 if (cp->config != 0x00 && cp->config != 0x01)
5571 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5572 MGMT_STATUS_INVALID_PARAMS);
5573
5574 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5575 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5576 MGMT_STATUS_NOT_SUPPORTED);
5577
5578 hci_dev_lock(hdev);
5579
5580 if (cp->config)
5581 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5582 &hdev->dev_flags);
5583 else
5584 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5585 &hdev->dev_flags);
5586
5587 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5588 if (err < 0)
5589 goto unlock;
5590
5591 if (!changed)
5592 goto unlock;
5593
5594 err = new_options(hdev, sk);
5595
5596 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5597 mgmt_index_removed(hdev);
5598
5599 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5600 set_bit(HCI_CONFIG, &hdev->dev_flags);
5601 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5602
5603 queue_work(hdev->req_workqueue, &hdev->power_on);
5604 } else {
5605 set_bit(HCI_RAW, &hdev->flags);
5606 mgmt_index_added(hdev);
5607 }
5608 }
5609
5610unlock:
5611 hci_dev_unlock(hdev);
5612 return err;
5613}
5614
5615static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5616 void *data, u16 len)
5617{
5618 struct mgmt_cp_set_public_address *cp = data;
5619 bool changed;
5620 int err;
5621
5622 BT_DBG("%s", hdev->name);
5623
5624 if (hdev_is_powered(hdev))
5625 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5626 MGMT_STATUS_REJECTED);
5627
5628 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5629 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5630 MGMT_STATUS_INVALID_PARAMS);
5631
5632 if (!hdev->set_bdaddr)
5633 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5634 MGMT_STATUS_NOT_SUPPORTED);
5635
5636 hci_dev_lock(hdev);
5637
5638 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5639 bacpy(&hdev->public_addr, &cp->bdaddr);
5640
5641 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5642 if (err < 0)
5643 goto unlock;
5644
5645 if (!changed)
5646 goto unlock;
5647
5648 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5649 err = new_options(hdev, sk);
5650
5651 if (is_configured(hdev)) {
5652 mgmt_index_removed(hdev);
5653
5654 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5655
5656 set_bit(HCI_CONFIG, &hdev->dev_flags);
5657 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5658
5659 queue_work(hdev->req_workqueue, &hdev->power_on);
5660 }
5661
5662unlock:
5663 hci_dev_unlock(hdev);
5664 return err;
5665}
5666
4793static const struct mgmt_handler { 5667static const struct mgmt_handler {
4794 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 5668 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 u16 data_len); 5669 u16 data_len);
@@ -4805,7 +5679,7 @@ static const struct mgmt_handler {
4805 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, 5679 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4806 { set_connectable, false, MGMT_SETTING_SIZE }, 5680 { set_connectable, false, MGMT_SETTING_SIZE },
4807 { set_fast_connectable, false, MGMT_SETTING_SIZE }, 5681 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4808 { set_pairable, false, MGMT_SETTING_SIZE }, 5682 { set_bondable, false, MGMT_SETTING_SIZE },
4809 { set_link_security, false, MGMT_SETTING_SIZE }, 5683 { set_link_security, false, MGMT_SETTING_SIZE },
4810 { set_ssp, false, MGMT_SETTING_SIZE }, 5684 { set_ssp, false, MGMT_SETTING_SIZE },
4811 { set_hs, false, MGMT_SETTING_SIZE }, 5685 { set_hs, false, MGMT_SETTING_SIZE },
@@ -4846,9 +5720,16 @@ static const struct mgmt_handler {
4846 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 5720 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4847 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 5721 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4848 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE }, 5722 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5723 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5724 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5725 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5726 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5727 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5728 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5729 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5730 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
4849}; 5731};
4850 5732
4851
4852int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 5733int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4853{ 5734{
4854 void *buf; 5735 void *buf;
@@ -4892,11 +5773,21 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4892 } 5773 }
4893 5774
4894 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 5775 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5776 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
4895 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 5777 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4896 err = cmd_status(sk, index, opcode, 5778 err = cmd_status(sk, index, opcode,
4897 MGMT_STATUS_INVALID_INDEX); 5779 MGMT_STATUS_INVALID_INDEX);
4898 goto done; 5780 goto done;
4899 } 5781 }
5782
5783 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5784 opcode != MGMT_OP_READ_CONFIG_INFO &&
5785 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5786 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5787 err = cmd_status(sk, index, opcode,
5788 MGMT_STATUS_INVALID_INDEX);
5789 goto done;
5790 }
4900 } 5791 }
4901 5792
4902 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 5793 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -4907,8 +5798,15 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4907 goto done; 5798 goto done;
4908 } 5799 }
4909 5800
4910 if ((hdev && opcode < MGMT_OP_READ_INFO) || 5801 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
4911 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 5802 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5803 err = cmd_status(sk, index, opcode,
5804 MGMT_STATUS_INVALID_INDEX);
5805 goto done;
5806 }
5807
5808 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5809 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
4912 err = cmd_status(sk, index, opcode, 5810 err = cmd_status(sk, index, opcode,
4913 MGMT_STATUS_INVALID_INDEX); 5811 MGMT_STATUS_INVALID_INDEX);
4914 goto done; 5812 goto done;
@@ -4947,7 +5845,13 @@ void mgmt_index_added(struct hci_dev *hdev)
4947 if (hdev->dev_type != HCI_BREDR) 5845 if (hdev->dev_type != HCI_BREDR)
4948 return; 5846 return;
4949 5847
4950 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 5848 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5849 return;
5850
5851 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5852 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5853 else
5854 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4951} 5855}
4952 5856
4953void mgmt_index_removed(struct hci_dev *hdev) 5857void mgmt_index_removed(struct hci_dev *hdev)
@@ -4957,20 +5861,42 @@ void mgmt_index_removed(struct hci_dev *hdev)
4957 if (hdev->dev_type != HCI_BREDR) 5861 if (hdev->dev_type != HCI_BREDR)
4958 return; 5862 return;
4959 5863
5864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5865 return;
5866
4960 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 5867 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4961 5868
4962 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 5869 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5870 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5871 else
5872 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4963} 5873}
4964 5874
4965/* This function requires the caller holds hdev->lock */ 5875/* This function requires the caller holds hdev->lock */
4966static void restart_le_auto_conns(struct hci_dev *hdev) 5876static void restart_le_actions(struct hci_dev *hdev)
4967{ 5877{
4968 struct hci_conn_params *p; 5878 struct hci_conn_params *p;
4969 5879
4970 list_for_each_entry(p, &hdev->le_conn_params, list) { 5880 list_for_each_entry(p, &hdev->le_conn_params, list) {
4971 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) 5881 /* Needed for AUTO_OFF case where might not "really"
4972 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type); 5882 * have been powered off.
5883 */
5884 list_del_init(&p->action);
5885
5886 switch (p->auto_connect) {
5887 case HCI_AUTO_CONN_DIRECT:
5888 case HCI_AUTO_CONN_ALWAYS:
5889 list_add(&p->action, &hdev->pend_le_conns);
5890 break;
5891 case HCI_AUTO_CONN_REPORT:
5892 list_add(&p->action, &hdev->pend_le_reports);
5893 break;
5894 default:
5895 break;
5896 }
4973 } 5897 }
5898
5899 hci_update_background_scan(hdev);
4974} 5900}
4975 5901
4976static void powered_complete(struct hci_dev *hdev, u8 status) 5902static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -4981,7 +5907,7 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
4981 5907
4982 hci_dev_lock(hdev); 5908 hci_dev_lock(hdev);
4983 5909
4984 restart_le_auto_conns(hdev); 5910 restart_le_actions(hdev);
4985 5911
4986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 5912 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4987 5913
@@ -5011,8 +5937,8 @@ static int powered_update_hci(struct hci_dev *hdev)
5011 lmp_bredr_capable(hdev)) { 5937 lmp_bredr_capable(hdev)) {
5012 struct hci_cp_write_le_host_supported cp; 5938 struct hci_cp_write_le_host_supported cp;
5013 5939
5014 cp.le = 1; 5940 cp.le = 0x01;
5015 cp.simul = lmp_le_br_capable(hdev); 5941 cp.simul = 0x00;
5016 5942
5017 /* Check first if we already have the right 5943 /* Check first if we already have the right
5018 * host state (host features set) 5944 * host state (host features set)
@@ -5138,92 +6064,6 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev)
5138 hci_dev_unlock(hdev); 6064 hci_dev_unlock(hdev);
5139} 6065}
5140 6066
5141void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5142{
5143 bool changed;
5144
5145 /* Nothing needed here if there's a pending command since that
5146 * commands request completion callback takes care of everything
5147 * necessary.
5148 */
5149 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5150 return;
5151
5152 /* Powering off may clear the scan mode - don't let that interfere */
5153 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5154 return;
5155
5156 if (discoverable) {
5157 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5158 } else {
5159 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5160 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5161 }
5162
5163 if (changed) {
5164 struct hci_request req;
5165
5166 /* In case this change in discoverable was triggered by
5167 * a disabling of connectable there could be a need to
5168 * update the advertising flags.
5169 */
5170 hci_req_init(&req, hdev);
5171 update_adv_data(&req);
5172 hci_req_run(&req, NULL);
5173
5174 new_settings(hdev, NULL);
5175 }
5176}
5177
5178void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5179{
5180 bool changed;
5181
5182 /* Nothing needed here if there's a pending command since that
5183 * commands request completion callback takes care of everything
5184 * necessary.
5185 */
5186 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5187 return;
5188
5189 /* Powering off may clear the scan mode - don't let that interfere */
5190 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 return;
5192
5193 if (connectable)
5194 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5195 else
5196 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5197
5198 if (changed)
5199 new_settings(hdev, NULL);
5200}
5201
5202void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5203{
5204 /* Powering off may stop advertising - don't let that interfere */
5205 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 return;
5207
5208 if (advertising)
5209 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5210 else
5211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5212}
5213
5214void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5215{
5216 u8 mgmt_err = mgmt_status(status);
5217
5218 if (scan & SCAN_PAGE)
5219 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5220 cmd_status_rsp, &mgmt_err);
5221
5222 if (scan & SCAN_INQUIRY)
5223 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5224 cmd_status_rsp, &mgmt_err);
5225}
5226
5227void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 6067void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5228 bool persistent) 6068 bool persistent)
5229{ 6069{
@@ -5279,7 +6119,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5279 ev.key.ediv = key->ediv; 6119 ev.key.ediv = key->ediv;
5280 ev.key.rand = key->rand; 6120 ev.key.rand = key->rand;
5281 6121
5282 if (key->type == HCI_SMP_LTK) 6122 if (key->type == SMP_LTK)
5283 ev.key.master = 1; 6123 ev.key.master = 1;
5284 6124
5285 memcpy(ev.key.val, key->val, sizeof(key->val)); 6125 memcpy(ev.key.val, key->val, sizeof(key->val));
@@ -5347,6 +6187,27 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5347 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); 6187 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5348} 6188}
5349 6189
6190void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6191 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6192 u16 max_interval, u16 latency, u16 timeout)
6193{
6194 struct mgmt_ev_new_conn_param ev;
6195
6196 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6197 return;
6198
6199 memset(&ev, 0, sizeof(ev));
6200 bacpy(&ev.addr.bdaddr, bdaddr);
6201 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6202 ev.store_hint = store_hint;
6203 ev.min_interval = cpu_to_le16(min_interval);
6204 ev.max_interval = cpu_to_le16(max_interval);
6205 ev.latency = cpu_to_le16(latency);
6206 ev.timeout = cpu_to_le16(timeout);
6207
6208 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6209}
6210
5350static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, 6211static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5351 u8 data_len) 6212 u8 data_len)
5352{ 6213{
@@ -5765,10 +6626,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5765 6626
5766 hci_req_init(&req, hdev); 6627 hci_req_init(&req, hdev);
5767 6628
5768 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 6629 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6630 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6631 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6632 sizeof(enable), &enable);
5769 update_eir(&req); 6633 update_eir(&req);
5770 else 6634 } else {
5771 clear_eir(&req); 6635 clear_eir(&req);
6636 }
5772 6637
5773 hci_req_run(&req, NULL); 6638 hci_req_run(&req, NULL);
5774} 6639}
@@ -5912,17 +6777,23 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5912} 6777}
5913 6778
5914void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 6779void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5915 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, 6780 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
5916 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp, 6781 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
5917 u8 scan_rsp_len)
5918{ 6782{
5919 char buf[512]; 6783 char buf[512];
5920 struct mgmt_ev_device_found *ev = (void *) buf; 6784 struct mgmt_ev_device_found *ev = (void *) buf;
5921 struct smp_irk *irk;
5922 size_t ev_size; 6785 size_t ev_size;
5923 6786
5924 if (!hci_discovery_active(hdev)) 6787 /* Don't send events for a non-kernel initiated discovery. With
5925 return; 6788 * LE one exception is if we have pend_le_reports > 0 in which
6789 * case we're doing passive scanning and want these events.
6790 */
6791 if (!hci_discovery_active(hdev)) {
6792 if (link_type == ACL_LINK)
6793 return;
6794 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6795 return;
6796 }
5926 6797
5927 /* Make sure that the buffer is big enough. The 5 extra bytes 6798 /* Make sure that the buffer is big enough. The 5 extra bytes
5928 * are for the potential CoD field. 6799 * are for the potential CoD field.
@@ -5932,20 +6803,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5932 6803
5933 memset(buf, 0, sizeof(buf)); 6804 memset(buf, 0, sizeof(buf));
5934 6805
5935 irk = hci_get_irk(hdev, bdaddr, addr_type); 6806 bacpy(&ev->addr.bdaddr, bdaddr);
5936 if (irk) { 6807 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5937 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5938 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5939 } else {
5940 bacpy(&ev->addr.bdaddr, bdaddr);
5941 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5942 }
5943
5944 ev->rssi = rssi; 6808 ev->rssi = rssi;
5945 if (cfm_name) 6809 ev->flags = cpu_to_le32(flags);
5946 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5947 if (!ssp)
5948 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5949 6810
5950 if (eir_len > 0) 6811 if (eir_len > 0)
5951 memcpy(ev->eir, eir, eir_len); 6812 memcpy(ev->eir, eir, eir_len);
@@ -6013,63 +6874,19 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6013 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); 6874 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6014} 6875}
6015 6876
6016int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6017{
6018 struct pending_cmd *cmd;
6019 struct mgmt_ev_device_blocked ev;
6020
6021 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6022
6023 bacpy(&ev.addr.bdaddr, bdaddr);
6024 ev.addr.type = type;
6025
6026 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6027 cmd ? cmd->sk : NULL);
6028}
6029
6030int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6031{
6032 struct pending_cmd *cmd;
6033 struct mgmt_ev_device_unblocked ev;
6034
6035 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6036
6037 bacpy(&ev.addr.bdaddr, bdaddr);
6038 ev.addr.type = type;
6039
6040 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6041 cmd ? cmd->sk : NULL);
6042}
6043
6044static void adv_enable_complete(struct hci_dev *hdev, u8 status) 6877static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6045{ 6878{
6046 BT_DBG("%s status %u", hdev->name, status); 6879 BT_DBG("%s status %u", hdev->name, status);
6047
6048 /* Clear the advertising mgmt setting if we failed to re-enable it */
6049 if (status) {
6050 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6051 new_settings(hdev, NULL);
6052 }
6053} 6880}
6054 6881
6055void mgmt_reenable_advertising(struct hci_dev *hdev) 6882void mgmt_reenable_advertising(struct hci_dev *hdev)
6056{ 6883{
6057 struct hci_request req; 6884 struct hci_request req;
6058 6885
6059 if (hci_conn_num(hdev, LE_LINK) > 0)
6060 return;
6061
6062 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 6886 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6063 return; 6887 return;
6064 6888
6065 hci_req_init(&req, hdev); 6889 hci_req_init(&req, hdev);
6066 enable_advertising(&req); 6890 enable_advertising(&req);
6067 6891 hci_req_run(&req, adv_enable_complete);
6068 /* If this fails we have no option but to let user space know
6069 * that we've disabled advertising.
6070 */
6071 if (hci_req_run(&req, adv_enable_complete) < 0) {
6072 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6073 new_settings(hdev, NULL);
6074 }
6075} 6892}