diff options
Diffstat (limited to 'net/bluetooth/hci_request.c')
-rw-r--r-- | net/bluetooth/hci_request.c | 1778 |
1 files changed, 1695 insertions, 83 deletions
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 981f8a202c27..41b5f3813f02 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c | |||
@@ -21,12 +21,19 @@ | |||
21 | SOFTWARE IS DISCLAIMED. | 21 | SOFTWARE IS DISCLAIMED. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <asm/unaligned.h> | ||
25 | |||
24 | #include <net/bluetooth/bluetooth.h> | 26 | #include <net/bluetooth/bluetooth.h> |
25 | #include <net/bluetooth/hci_core.h> | 27 | #include <net/bluetooth/hci_core.h> |
28 | #include <net/bluetooth/mgmt.h> | ||
26 | 29 | ||
27 | #include "smp.h" | 30 | #include "smp.h" |
28 | #include "hci_request.h" | 31 | #include "hci_request.h" |
29 | 32 | ||
33 | #define HCI_REQ_DONE 0 | ||
34 | #define HCI_REQ_PEND 1 | ||
35 | #define HCI_REQ_CANCELED 2 | ||
36 | |||
30 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) | 37 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
31 | { | 38 | { |
32 | skb_queue_head_init(&req->cmd_q); | 39 | skb_queue_head_init(&req->cmd_q); |
@@ -56,8 +63,12 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete, | |||
56 | return -ENODATA; | 63 | return -ENODATA; |
57 | 64 | ||
58 | skb = skb_peek_tail(&req->cmd_q); | 65 | skb = skb_peek_tail(&req->cmd_q); |
59 | bt_cb(skb)->hci.req_complete = complete; | 66 | if (complete) { |
60 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | 67 | bt_cb(skb)->hci.req_complete = complete; |
68 | } else if (complete_skb) { | ||
69 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | ||
70 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; | ||
71 | } | ||
61 | 72 | ||
62 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | 73 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
63 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | 74 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); |
@@ -78,6 +89,203 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | |||
78 | return req_run(req, NULL, complete); | 89 | return req_run(req, NULL, complete); |
79 | } | 90 | } |
80 | 91 | ||
92 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, | ||
93 | struct sk_buff *skb) | ||
94 | { | ||
95 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | ||
96 | |||
97 | if (hdev->req_status == HCI_REQ_PEND) { | ||
98 | hdev->req_result = result; | ||
99 | hdev->req_status = HCI_REQ_DONE; | ||
100 | if (skb) | ||
101 | hdev->req_skb = skb_get(skb); | ||
102 | wake_up_interruptible(&hdev->req_wait_q); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | void hci_req_sync_cancel(struct hci_dev *hdev, int err) | ||
107 | { | ||
108 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | ||
109 | |||
110 | if (hdev->req_status == HCI_REQ_PEND) { | ||
111 | hdev->req_result = err; | ||
112 | hdev->req_status = HCI_REQ_CANCELED; | ||
113 | wake_up_interruptible(&hdev->req_wait_q); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | ||
118 | const void *param, u8 event, u32 timeout) | ||
119 | { | ||
120 | DECLARE_WAITQUEUE(wait, current); | ||
121 | struct hci_request req; | ||
122 | struct sk_buff *skb; | ||
123 | int err = 0; | ||
124 | |||
125 | BT_DBG("%s", hdev->name); | ||
126 | |||
127 | hci_req_init(&req, hdev); | ||
128 | |||
129 | hci_req_add_ev(&req, opcode, plen, param, event); | ||
130 | |||
131 | hdev->req_status = HCI_REQ_PEND; | ||
132 | |||
133 | add_wait_queue(&hdev->req_wait_q, &wait); | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | ||
135 | |||
136 | err = hci_req_run_skb(&req, hci_req_sync_complete); | ||
137 | if (err < 0) { | ||
138 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
139 | set_current_state(TASK_RUNNING); | ||
140 | return ERR_PTR(err); | ||
141 | } | ||
142 | |||
143 | schedule_timeout(timeout); | ||
144 | |||
145 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
146 | |||
147 | if (signal_pending(current)) | ||
148 | return ERR_PTR(-EINTR); | ||
149 | |||
150 | switch (hdev->req_status) { | ||
151 | case HCI_REQ_DONE: | ||
152 | err = -bt_to_errno(hdev->req_result); | ||
153 | break; | ||
154 | |||
155 | case HCI_REQ_CANCELED: | ||
156 | err = -hdev->req_result; | ||
157 | break; | ||
158 | |||
159 | default: | ||
160 | err = -ETIMEDOUT; | ||
161 | break; | ||
162 | } | ||
163 | |||
164 | hdev->req_status = hdev->req_result = 0; | ||
165 | skb = hdev->req_skb; | ||
166 | hdev->req_skb = NULL; | ||
167 | |||
168 | BT_DBG("%s end: err %d", hdev->name, err); | ||
169 | |||
170 | if (err < 0) { | ||
171 | kfree_skb(skb); | ||
172 | return ERR_PTR(err); | ||
173 | } | ||
174 | |||
175 | if (!skb) | ||
176 | return ERR_PTR(-ENODATA); | ||
177 | |||
178 | return skb; | ||
179 | } | ||
180 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | ||
181 | |||
182 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | ||
183 | const void *param, u32 timeout) | ||
184 | { | ||
185 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); | ||
186 | } | ||
187 | EXPORT_SYMBOL(__hci_cmd_sync); | ||
188 | |||
189 | /* Execute request and wait for completion. */ | ||
190 | int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, | ||
191 | unsigned long opt), | ||
192 | unsigned long opt, u32 timeout, u8 *hci_status) | ||
193 | { | ||
194 | struct hci_request req; | ||
195 | DECLARE_WAITQUEUE(wait, current); | ||
196 | int err = 0; | ||
197 | |||
198 | BT_DBG("%s start", hdev->name); | ||
199 | |||
200 | hci_req_init(&req, hdev); | ||
201 | |||
202 | hdev->req_status = HCI_REQ_PEND; | ||
203 | |||
204 | err = func(&req, opt); | ||
205 | if (err) { | ||
206 | if (hci_status) | ||
207 | *hci_status = HCI_ERROR_UNSPECIFIED; | ||
208 | return err; | ||
209 | } | ||
210 | |||
211 | add_wait_queue(&hdev->req_wait_q, &wait); | ||
212 | set_current_state(TASK_INTERRUPTIBLE); | ||
213 | |||
214 | err = hci_req_run_skb(&req, hci_req_sync_complete); | ||
215 | if (err < 0) { | ||
216 | hdev->req_status = 0; | ||
217 | |||
218 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
219 | set_current_state(TASK_RUNNING); | ||
220 | |||
221 | /* ENODATA means the HCI request command queue is empty. | ||
222 | * This can happen when a request with conditionals doesn't | ||
223 | * trigger any commands to be sent. This is normal behavior | ||
224 | * and should not trigger an error return. | ||
225 | */ | ||
226 | if (err == -ENODATA) { | ||
227 | if (hci_status) | ||
228 | *hci_status = 0; | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | if (hci_status) | ||
233 | *hci_status = HCI_ERROR_UNSPECIFIED; | ||
234 | |||
235 | return err; | ||
236 | } | ||
237 | |||
238 | schedule_timeout(timeout); | ||
239 | |||
240 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
241 | |||
242 | if (signal_pending(current)) | ||
243 | return -EINTR; | ||
244 | |||
245 | switch (hdev->req_status) { | ||
246 | case HCI_REQ_DONE: | ||
247 | err = -bt_to_errno(hdev->req_result); | ||
248 | if (hci_status) | ||
249 | *hci_status = hdev->req_result; | ||
250 | break; | ||
251 | |||
252 | case HCI_REQ_CANCELED: | ||
253 | err = -hdev->req_result; | ||
254 | if (hci_status) | ||
255 | *hci_status = HCI_ERROR_UNSPECIFIED; | ||
256 | break; | ||
257 | |||
258 | default: | ||
259 | err = -ETIMEDOUT; | ||
260 | if (hci_status) | ||
261 | *hci_status = HCI_ERROR_UNSPECIFIED; | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | hdev->req_status = hdev->req_result = 0; | ||
266 | |||
267 | BT_DBG("%s end: err %d", hdev->name, err); | ||
268 | |||
269 | return err; | ||
270 | } | ||
271 | |||
272 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, | ||
273 | unsigned long opt), | ||
274 | unsigned long opt, u32 timeout, u8 *hci_status) | ||
275 | { | ||
276 | int ret; | ||
277 | |||
278 | if (!test_bit(HCI_UP, &hdev->flags)) | ||
279 | return -ENETDOWN; | ||
280 | |||
281 | /* Serialize all requests */ | ||
282 | hci_req_sync_lock(hdev); | ||
283 | ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); | ||
284 | hci_req_sync_unlock(hdev); | ||
285 | |||
286 | return ret; | ||
287 | } | ||
288 | |||
81 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | 289 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
82 | const void *param) | 290 | const void *param) |
83 | { | 291 | { |
@@ -98,8 +306,8 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | |||
98 | 306 | ||
99 | BT_DBG("skb len %d", skb->len); | 307 | BT_DBG("skb len %d", skb->len); |
100 | 308 | ||
101 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; | 309 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
102 | bt_cb(skb)->hci.opcode = opcode; | 310 | hci_skb_opcode(skb) = opcode; |
103 | 311 | ||
104 | return skb; | 312 | return skb; |
105 | } | 313 | } |
@@ -128,7 +336,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | |||
128 | } | 336 | } |
129 | 337 | ||
130 | if (skb_queue_empty(&req->cmd_q)) | 338 | if (skb_queue_empty(&req->cmd_q)) |
131 | bt_cb(skb)->hci.req_start = true; | 339 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
132 | 340 | ||
133 | bt_cb(skb)->hci.req_event = event; | 341 | bt_cb(skb)->hci.req_event = event; |
134 | 342 | ||
@@ -141,6 +349,311 @@ void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | |||
141 | hci_req_add_ev(req, opcode, plen, param, 0); | 349 | hci_req_add_ev(req, opcode, plen, param, 0); |
142 | } | 350 | } |
143 | 351 | ||
352 | void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) | ||
353 | { | ||
354 | struct hci_dev *hdev = req->hdev; | ||
355 | struct hci_cp_write_page_scan_activity acp; | ||
356 | u8 type; | ||
357 | |||
358 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | ||
359 | return; | ||
360 | |||
361 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | ||
362 | return; | ||
363 | |||
364 | if (enable) { | ||
365 | type = PAGE_SCAN_TYPE_INTERLACED; | ||
366 | |||
367 | /* 160 msec page scan interval */ | ||
368 | acp.interval = cpu_to_le16(0x0100); | ||
369 | } else { | ||
370 | type = PAGE_SCAN_TYPE_STANDARD; /* default */ | ||
371 | |||
372 | /* default 1.28 sec page scan */ | ||
373 | acp.interval = cpu_to_le16(0x0800); | ||
374 | } | ||
375 | |||
376 | acp.window = cpu_to_le16(0x0012); | ||
377 | |||
378 | if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || | ||
379 | __cpu_to_le16(hdev->page_scan_window) != acp.window) | ||
380 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, | ||
381 | sizeof(acp), &acp); | ||
382 | |||
383 | if (hdev->page_scan_type != type) | ||
384 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | ||
385 | } | ||
386 | |||
387 | /* This function controls the background scanning based on hdev->pend_le_conns | ||
388 | * list. If there are pending LE connection we start the background scanning, | ||
389 | * otherwise we stop it. | ||
390 | * | ||
391 | * This function requires the caller holds hdev->lock. | ||
392 | */ | ||
393 | static void __hci_update_background_scan(struct hci_request *req) | ||
394 | { | ||
395 | struct hci_dev *hdev = req->hdev; | ||
396 | |||
397 | if (!test_bit(HCI_UP, &hdev->flags) || | ||
398 | test_bit(HCI_INIT, &hdev->flags) || | ||
399 | hci_dev_test_flag(hdev, HCI_SETUP) || | ||
400 | hci_dev_test_flag(hdev, HCI_CONFIG) || | ||
401 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || | ||
402 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) | ||
403 | return; | ||
404 | |||
405 | /* No point in doing scanning if LE support hasn't been enabled */ | ||
406 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | ||
407 | return; | ||
408 | |||
409 | /* If discovery is active don't interfere with it */ | ||
410 | if (hdev->discovery.state != DISCOVERY_STOPPED) | ||
411 | return; | ||
412 | |||
413 | /* Reset RSSI and UUID filters when starting background scanning | ||
414 | * since these filters are meant for service discovery only. | ||
415 | * | ||
416 | * The Start Discovery and Start Service Discovery operations | ||
417 | * ensure to set proper values for RSSI threshold and UUID | ||
418 | * filter list. So it is safe to just reset them here. | ||
419 | */ | ||
420 | hci_discovery_filter_clear(hdev); | ||
421 | |||
422 | if (list_empty(&hdev->pend_le_conns) && | ||
423 | list_empty(&hdev->pend_le_reports)) { | ||
424 | /* If there is no pending LE connections or devices | ||
425 | * to be scanned for, we should stop the background | ||
426 | * scanning. | ||
427 | */ | ||
428 | |||
429 | /* If controller is not scanning we are done. */ | ||
430 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
431 | return; | ||
432 | |||
433 | hci_req_add_le_scan_disable(req); | ||
434 | |||
435 | BT_DBG("%s stopping background scanning", hdev->name); | ||
436 | } else { | ||
437 | /* If there is at least one pending LE connection, we should | ||
438 | * keep the background scan running. | ||
439 | */ | ||
440 | |||
441 | /* If controller is connecting, we should not start scanning | ||
442 | * since some controllers are not able to scan and connect at | ||
443 | * the same time. | ||
444 | */ | ||
445 | if (hci_lookup_le_connect(hdev)) | ||
446 | return; | ||
447 | |||
448 | /* If controller is currently scanning, we stop it to ensure we | ||
449 | * don't miss any advertising (due to duplicates filter). | ||
450 | */ | ||
451 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
452 | hci_req_add_le_scan_disable(req); | ||
453 | |||
454 | hci_req_add_le_passive_scan(req); | ||
455 | |||
456 | BT_DBG("%s starting background scanning", hdev->name); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | void __hci_req_update_name(struct hci_request *req) | ||
461 | { | ||
462 | struct hci_dev *hdev = req->hdev; | ||
463 | struct hci_cp_write_local_name cp; | ||
464 | |||
465 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); | ||
466 | |||
467 | hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); | ||
468 | } | ||
469 | |||
470 | #define PNP_INFO_SVCLASS_ID 0x1200 | ||
471 | |||
472 | static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | ||
473 | { | ||
474 | u8 *ptr = data, *uuids_start = NULL; | ||
475 | struct bt_uuid *uuid; | ||
476 | |||
477 | if (len < 4) | ||
478 | return ptr; | ||
479 | |||
480 | list_for_each_entry(uuid, &hdev->uuids, list) { | ||
481 | u16 uuid16; | ||
482 | |||
483 | if (uuid->size != 16) | ||
484 | continue; | ||
485 | |||
486 | uuid16 = get_unaligned_le16(&uuid->uuid[12]); | ||
487 | if (uuid16 < 0x1100) | ||
488 | continue; | ||
489 | |||
490 | if (uuid16 == PNP_INFO_SVCLASS_ID) | ||
491 | continue; | ||
492 | |||
493 | if (!uuids_start) { | ||
494 | uuids_start = ptr; | ||
495 | uuids_start[0] = 1; | ||
496 | uuids_start[1] = EIR_UUID16_ALL; | ||
497 | ptr += 2; | ||
498 | } | ||
499 | |||
500 | /* Stop if not enough space to put next UUID */ | ||
501 | if ((ptr - data) + sizeof(u16) > len) { | ||
502 | uuids_start[1] = EIR_UUID16_SOME; | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | *ptr++ = (uuid16 & 0x00ff); | ||
507 | *ptr++ = (uuid16 & 0xff00) >> 8; | ||
508 | uuids_start[0] += sizeof(uuid16); | ||
509 | } | ||
510 | |||
511 | return ptr; | ||
512 | } | ||
513 | |||
514 | static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | ||
515 | { | ||
516 | u8 *ptr = data, *uuids_start = NULL; | ||
517 | struct bt_uuid *uuid; | ||
518 | |||
519 | if (len < 6) | ||
520 | return ptr; | ||
521 | |||
522 | list_for_each_entry(uuid, &hdev->uuids, list) { | ||
523 | if (uuid->size != 32) | ||
524 | continue; | ||
525 | |||
526 | if (!uuids_start) { | ||
527 | uuids_start = ptr; | ||
528 | uuids_start[0] = 1; | ||
529 | uuids_start[1] = EIR_UUID32_ALL; | ||
530 | ptr += 2; | ||
531 | } | ||
532 | |||
533 | /* Stop if not enough space to put next UUID */ | ||
534 | if ((ptr - data) + sizeof(u32) > len) { | ||
535 | uuids_start[1] = EIR_UUID32_SOME; | ||
536 | break; | ||
537 | } | ||
538 | |||
539 | memcpy(ptr, &uuid->uuid[12], sizeof(u32)); | ||
540 | ptr += sizeof(u32); | ||
541 | uuids_start[0] += sizeof(u32); | ||
542 | } | ||
543 | |||
544 | return ptr; | ||
545 | } | ||
546 | |||
547 | static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | ||
548 | { | ||
549 | u8 *ptr = data, *uuids_start = NULL; | ||
550 | struct bt_uuid *uuid; | ||
551 | |||
552 | if (len < 18) | ||
553 | return ptr; | ||
554 | |||
555 | list_for_each_entry(uuid, &hdev->uuids, list) { | ||
556 | if (uuid->size != 128) | ||
557 | continue; | ||
558 | |||
559 | if (!uuids_start) { | ||
560 | uuids_start = ptr; | ||
561 | uuids_start[0] = 1; | ||
562 | uuids_start[1] = EIR_UUID128_ALL; | ||
563 | ptr += 2; | ||
564 | } | ||
565 | |||
566 | /* Stop if not enough space to put next UUID */ | ||
567 | if ((ptr - data) + 16 > len) { | ||
568 | uuids_start[1] = EIR_UUID128_SOME; | ||
569 | break; | ||
570 | } | ||
571 | |||
572 | memcpy(ptr, uuid->uuid, 16); | ||
573 | ptr += 16; | ||
574 | uuids_start[0] += 16; | ||
575 | } | ||
576 | |||
577 | return ptr; | ||
578 | } | ||
579 | |||
580 | static void create_eir(struct hci_dev *hdev, u8 *data) | ||
581 | { | ||
582 | u8 *ptr = data; | ||
583 | size_t name_len; | ||
584 | |||
585 | name_len = strlen(hdev->dev_name); | ||
586 | |||
587 | if (name_len > 0) { | ||
588 | /* EIR Data type */ | ||
589 | if (name_len > 48) { | ||
590 | name_len = 48; | ||
591 | ptr[1] = EIR_NAME_SHORT; | ||
592 | } else | ||
593 | ptr[1] = EIR_NAME_COMPLETE; | ||
594 | |||
595 | /* EIR Data length */ | ||
596 | ptr[0] = name_len + 1; | ||
597 | |||
598 | memcpy(ptr + 2, hdev->dev_name, name_len); | ||
599 | |||
600 | ptr += (name_len + 2); | ||
601 | } | ||
602 | |||
603 | if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { | ||
604 | ptr[0] = 2; | ||
605 | ptr[1] = EIR_TX_POWER; | ||
606 | ptr[2] = (u8) hdev->inq_tx_power; | ||
607 | |||
608 | ptr += 3; | ||
609 | } | ||
610 | |||
611 | if (hdev->devid_source > 0) { | ||
612 | ptr[0] = 9; | ||
613 | ptr[1] = EIR_DEVICE_ID; | ||
614 | |||
615 | put_unaligned_le16(hdev->devid_source, ptr + 2); | ||
616 | put_unaligned_le16(hdev->devid_vendor, ptr + 4); | ||
617 | put_unaligned_le16(hdev->devid_product, ptr + 6); | ||
618 | put_unaligned_le16(hdev->devid_version, ptr + 8); | ||
619 | |||
620 | ptr += 10; | ||
621 | } | ||
622 | |||
623 | ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | ||
624 | ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | ||
625 | ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | ||
626 | } | ||
627 | |||
628 | void __hci_req_update_eir(struct hci_request *req) | ||
629 | { | ||
630 | struct hci_dev *hdev = req->hdev; | ||
631 | struct hci_cp_write_eir cp; | ||
632 | |||
633 | if (!hdev_is_powered(hdev)) | ||
634 | return; | ||
635 | |||
636 | if (!lmp_ext_inq_capable(hdev)) | ||
637 | return; | ||
638 | |||
639 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) | ||
640 | return; | ||
641 | |||
642 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) | ||
643 | return; | ||
644 | |||
645 | memset(&cp, 0, sizeof(cp)); | ||
646 | |||
647 | create_eir(hdev, cp.data); | ||
648 | |||
649 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | ||
650 | return; | ||
651 | |||
652 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | ||
653 | |||
654 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | ||
655 | } | ||
656 | |||
144 | void hci_req_add_le_scan_disable(struct hci_request *req) | 657 | void hci_req_add_le_scan_disable(struct hci_request *req) |
145 | { | 658 | { |
146 | struct hci_cp_le_set_scan_enable cp; | 659 | struct hci_cp_le_set_scan_enable cp; |
@@ -302,6 +815,483 @@ void hci_req_add_le_passive_scan(struct hci_request *req) | |||
302 | &enable_cp); | 815 | &enable_cp); |
303 | } | 816 | } |
304 | 817 | ||
818 | static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) | ||
819 | { | ||
820 | u8 instance = hdev->cur_adv_instance; | ||
821 | struct adv_info *adv_instance; | ||
822 | |||
823 | /* Ignore instance 0 */ | ||
824 | if (instance == 0x00) | ||
825 | return 0; | ||
826 | |||
827 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
828 | if (!adv_instance) | ||
829 | return 0; | ||
830 | |||
831 | /* TODO: Take into account the "appearance" and "local-name" flags here. | ||
832 | * These are currently being ignored as they are not supported. | ||
833 | */ | ||
834 | return adv_instance->scan_rsp_len; | ||
835 | } | ||
836 | |||
837 | void __hci_req_disable_advertising(struct hci_request *req) | ||
838 | { | ||
839 | u8 enable = 0x00; | ||
840 | |||
841 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | ||
842 | } | ||
843 | |||
844 | static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) | ||
845 | { | ||
846 | u32 flags; | ||
847 | struct adv_info *adv_instance; | ||
848 | |||
849 | if (instance == 0x00) { | ||
850 | /* Instance 0 always manages the "Tx Power" and "Flags" | ||
851 | * fields | ||
852 | */ | ||
853 | flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; | ||
854 | |||
855 | /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting | ||
856 | * corresponds to the "connectable" instance flag. | ||
857 | */ | ||
858 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) | ||
859 | flags |= MGMT_ADV_FLAG_CONNECTABLE; | ||
860 | |||
861 | return flags; | ||
862 | } | ||
863 | |||
864 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
865 | |||
866 | /* Return 0 when we got an invalid instance identifier. */ | ||
867 | if (!adv_instance) | ||
868 | return 0; | ||
869 | |||
870 | return adv_instance->flags; | ||
871 | } | ||
872 | |||
873 | void __hci_req_enable_advertising(struct hci_request *req) | ||
874 | { | ||
875 | struct hci_dev *hdev = req->hdev; | ||
876 | struct hci_cp_le_set_adv_param cp; | ||
877 | u8 own_addr_type, enable = 0x01; | ||
878 | bool connectable; | ||
879 | u32 flags; | ||
880 | |||
881 | if (hci_conn_num(hdev, LE_LINK) > 0) | ||
882 | return; | ||
883 | |||
884 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) | ||
885 | __hci_req_disable_advertising(req); | ||
886 | |||
887 | /* Clear the HCI_LE_ADV bit temporarily so that the | ||
888 | * hci_update_random_address knows that it's safe to go ahead | ||
889 | * and write a new random address. The flag will be set back on | ||
890 | * as soon as the SET_ADV_ENABLE HCI command completes. | ||
891 | */ | ||
892 | hci_dev_clear_flag(hdev, HCI_LE_ADV); | ||
893 | |||
894 | flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); | ||
895 | |||
896 | /* If the "connectable" instance flag was not set, then choose between | ||
897 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. | ||
898 | */ | ||
899 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || | ||
900 | mgmt_get_connectable(hdev); | ||
901 | |||
902 | /* Set require_privacy to true only when non-connectable | ||
903 | * advertising is used. In that case it is fine to use a | ||
904 | * non-resolvable private address. | ||
905 | */ | ||
906 | if (hci_update_random_address(req, !connectable, &own_addr_type) < 0) | ||
907 | return; | ||
908 | |||
909 | memset(&cp, 0, sizeof(cp)); | ||
910 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); | ||
911 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); | ||
912 | |||
913 | if (connectable) | ||
914 | cp.type = LE_ADV_IND; | ||
915 | else if (get_cur_adv_instance_scan_rsp_len(hdev)) | ||
916 | cp.type = LE_ADV_SCAN_IND; | ||
917 | else | ||
918 | cp.type = LE_ADV_NONCONN_IND; | ||
919 | |||
920 | cp.own_address_type = own_addr_type; | ||
921 | cp.channel_map = hdev->le_adv_channel_map; | ||
922 | |||
923 | hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); | ||
924 | |||
925 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | ||
926 | } | ||
927 | |||
928 | static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) | ||
929 | { | ||
930 | u8 ad_len = 0; | ||
931 | size_t name_len; | ||
932 | |||
933 | name_len = strlen(hdev->dev_name); | ||
934 | if (name_len > 0) { | ||
935 | size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; | ||
936 | |||
937 | if (name_len > max_len) { | ||
938 | name_len = max_len; | ||
939 | ptr[1] = EIR_NAME_SHORT; | ||
940 | } else | ||
941 | ptr[1] = EIR_NAME_COMPLETE; | ||
942 | |||
943 | ptr[0] = name_len + 1; | ||
944 | |||
945 | memcpy(ptr + 2, hdev->dev_name, name_len); | ||
946 | |||
947 | ad_len += (name_len + 2); | ||
948 | ptr += (name_len + 2); | ||
949 | } | ||
950 | |||
951 | return ad_len; | ||
952 | } | ||
953 | |||
954 | static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, | ||
955 | u8 *ptr) | ||
956 | { | ||
957 | struct adv_info *adv_instance; | ||
958 | |||
959 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
960 | if (!adv_instance) | ||
961 | return 0; | ||
962 | |||
963 | /* TODO: Set the appropriate entries based on advertising instance flags | ||
964 | * here once flags other than 0 are supported. | ||
965 | */ | ||
966 | memcpy(ptr, adv_instance->scan_rsp_data, | ||
967 | adv_instance->scan_rsp_len); | ||
968 | |||
969 | return adv_instance->scan_rsp_len; | ||
970 | } | ||
971 | |||
972 | void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) | ||
973 | { | ||
974 | struct hci_dev *hdev = req->hdev; | ||
975 | struct hci_cp_le_set_scan_rsp_data cp; | ||
976 | u8 len; | ||
977 | |||
978 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | ||
979 | return; | ||
980 | |||
981 | memset(&cp, 0, sizeof(cp)); | ||
982 | |||
983 | if (instance) | ||
984 | len = create_instance_scan_rsp_data(hdev, instance, cp.data); | ||
985 | else | ||
986 | len = create_default_scan_rsp_data(hdev, cp.data); | ||
987 | |||
988 | if (hdev->scan_rsp_data_len == len && | ||
989 | !memcmp(cp.data, hdev->scan_rsp_data, len)) | ||
990 | return; | ||
991 | |||
992 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); | ||
993 | hdev->scan_rsp_data_len = len; | ||
994 | |||
995 | cp.length = len; | ||
996 | |||
997 | hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); | ||
998 | } | ||
999 | |||
1000 | static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | ||
1001 | { | ||
1002 | struct adv_info *adv_instance = NULL; | ||
1003 | u8 ad_len = 0, flags = 0; | ||
1004 | u32 instance_flags; | ||
1005 | |||
1006 | /* Return 0 when the current instance identifier is invalid. */ | ||
1007 | if (instance) { | ||
1008 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
1009 | if (!adv_instance) | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | instance_flags = get_adv_instance_flags(hdev, instance); | ||
1014 | |||
1015 | /* The Add Advertising command allows userspace to set both the general | ||
1016 | * and limited discoverable flags. | ||
1017 | */ | ||
1018 | if (instance_flags & MGMT_ADV_FLAG_DISCOV) | ||
1019 | flags |= LE_AD_GENERAL; | ||
1020 | |||
1021 | if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) | ||
1022 | flags |= LE_AD_LIMITED; | ||
1023 | |||
1024 | if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { | ||
1025 | /* If a discovery flag wasn't provided, simply use the global | ||
1026 | * settings. | ||
1027 | */ | ||
1028 | if (!flags) | ||
1029 | flags |= mgmt_get_adv_discov_flags(hdev); | ||
1030 | |||
1031 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | ||
1032 | flags |= LE_AD_NO_BREDR; | ||
1033 | |||
1034 | /* If flags would still be empty, then there is no need to | ||
1035 | * include the "Flags" AD field". | ||
1036 | */ | ||
1037 | if (flags) { | ||
1038 | ptr[0] = 0x02; | ||
1039 | ptr[1] = EIR_FLAGS; | ||
1040 | ptr[2] = flags; | ||
1041 | |||
1042 | ad_len += 3; | ||
1043 | ptr += 3; | ||
1044 | } | ||
1045 | } | ||
1046 | |||
1047 | if (adv_instance) { | ||
1048 | memcpy(ptr, adv_instance->adv_data, | ||
1049 | adv_instance->adv_data_len); | ||
1050 | ad_len += adv_instance->adv_data_len; | ||
1051 | ptr += adv_instance->adv_data_len; | ||
1052 | } | ||
1053 | |||
1054 | /* Provide Tx Power only if we can provide a valid value for it */ | ||
1055 | if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && | ||
1056 | (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { | ||
1057 | ptr[0] = 0x02; | ||
1058 | ptr[1] = EIR_TX_POWER; | ||
1059 | ptr[2] = (u8)hdev->adv_tx_power; | ||
1060 | |||
1061 | ad_len += 3; | ||
1062 | ptr += 3; | ||
1063 | } | ||
1064 | |||
1065 | return ad_len; | ||
1066 | } | ||
1067 | |||
1068 | void __hci_req_update_adv_data(struct hci_request *req, u8 instance) | ||
1069 | { | ||
1070 | struct hci_dev *hdev = req->hdev; | ||
1071 | struct hci_cp_le_set_adv_data cp; | ||
1072 | u8 len; | ||
1073 | |||
1074 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | ||
1075 | return; | ||
1076 | |||
1077 | memset(&cp, 0, sizeof(cp)); | ||
1078 | |||
1079 | len = create_instance_adv_data(hdev, instance, cp.data); | ||
1080 | |||
1081 | /* There's nothing to do if the data hasn't changed */ | ||
1082 | if (hdev->adv_data_len == len && | ||
1083 | memcmp(cp.data, hdev->adv_data, len) == 0) | ||
1084 | return; | ||
1085 | |||
1086 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | ||
1087 | hdev->adv_data_len = len; | ||
1088 | |||
1089 | cp.length = len; | ||
1090 | |||
1091 | hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); | ||
1092 | } | ||
1093 | |||
1094 | int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) | ||
1095 | { | ||
1096 | struct hci_request req; | ||
1097 | |||
1098 | hci_req_init(&req, hdev); | ||
1099 | __hci_req_update_adv_data(&req, instance); | ||
1100 | |||
1101 | return hci_req_run(&req, NULL); | ||
1102 | } | ||
1103 | |||
1104 | static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) | ||
1105 | { | ||
1106 | BT_DBG("%s status %u", hdev->name, status); | ||
1107 | } | ||
1108 | |||
1109 | void hci_req_reenable_advertising(struct hci_dev *hdev) | ||
1110 | { | ||
1111 | struct hci_request req; | ||
1112 | |||
1113 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && | ||
1114 | list_empty(&hdev->adv_instances)) | ||
1115 | return; | ||
1116 | |||
1117 | hci_req_init(&req, hdev); | ||
1118 | |||
1119 | if (hdev->cur_adv_instance) { | ||
1120 | __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, | ||
1121 | true); | ||
1122 | } else { | ||
1123 | __hci_req_update_adv_data(&req, 0x00); | ||
1124 | __hci_req_update_scan_rsp_data(&req, 0x00); | ||
1125 | __hci_req_enable_advertising(&req); | ||
1126 | } | ||
1127 | |||
1128 | hci_req_run(&req, adv_enable_complete); | ||
1129 | } | ||
1130 | |||
1131 | static void adv_timeout_expire(struct work_struct *work) | ||
1132 | { | ||
1133 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1134 | adv_instance_expire.work); | ||
1135 | |||
1136 | struct hci_request req; | ||
1137 | u8 instance; | ||
1138 | |||
1139 | BT_DBG("%s", hdev->name); | ||
1140 | |||
1141 | hci_dev_lock(hdev); | ||
1142 | |||
1143 | hdev->adv_instance_timeout = 0; | ||
1144 | |||
1145 | instance = hdev->cur_adv_instance; | ||
1146 | if (instance == 0x00) | ||
1147 | goto unlock; | ||
1148 | |||
1149 | hci_req_init(&req, hdev); | ||
1150 | |||
1151 | hci_req_clear_adv_instance(hdev, &req, instance, false); | ||
1152 | |||
1153 | if (list_empty(&hdev->adv_instances)) | ||
1154 | __hci_req_disable_advertising(&req); | ||
1155 | |||
1156 | hci_req_run(&req, NULL); | ||
1157 | |||
1158 | unlock: | ||
1159 | hci_dev_unlock(hdev); | ||
1160 | } | ||
1161 | |||
1162 | int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, | ||
1163 | bool force) | ||
1164 | { | ||
1165 | struct hci_dev *hdev = req->hdev; | ||
1166 | struct adv_info *adv_instance = NULL; | ||
1167 | u16 timeout; | ||
1168 | |||
1169 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | ||
1170 | list_empty(&hdev->adv_instances)) | ||
1171 | return -EPERM; | ||
1172 | |||
1173 | if (hdev->adv_instance_timeout) | ||
1174 | return -EBUSY; | ||
1175 | |||
1176 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
1177 | if (!adv_instance) | ||
1178 | return -ENOENT; | ||
1179 | |||
1180 | /* A zero timeout means unlimited advertising. As long as there is | ||
1181 | * only one instance, duration should be ignored. We still set a timeout | ||
1182 | * in case further instances are being added later on. | ||
1183 | * | ||
1184 | * If the remaining lifetime of the instance is more than the duration | ||
1185 | * then the timeout corresponds to the duration, otherwise it will be | ||
1186 | * reduced to the remaining instance lifetime. | ||
1187 | */ | ||
1188 | if (adv_instance->timeout == 0 || | ||
1189 | adv_instance->duration <= adv_instance->remaining_time) | ||
1190 | timeout = adv_instance->duration; | ||
1191 | else | ||
1192 | timeout = adv_instance->remaining_time; | ||
1193 | |||
1194 | /* The remaining time is being reduced unless the instance is being | ||
1195 | * advertised without time limit. | ||
1196 | */ | ||
1197 | if (adv_instance->timeout) | ||
1198 | adv_instance->remaining_time = | ||
1199 | adv_instance->remaining_time - timeout; | ||
1200 | |||
1201 | hdev->adv_instance_timeout = timeout; | ||
1202 | queue_delayed_work(hdev->req_workqueue, | ||
1203 | &hdev->adv_instance_expire, | ||
1204 | msecs_to_jiffies(timeout * 1000)); | ||
1205 | |||
1206 | /* If we're just re-scheduling the same instance again then do not | ||
1207 | * execute any HCI commands. This happens when a single instance is | ||
1208 | * being advertised. | ||
1209 | */ | ||
1210 | if (!force && hdev->cur_adv_instance == instance && | ||
1211 | hci_dev_test_flag(hdev, HCI_LE_ADV)) | ||
1212 | return 0; | ||
1213 | |||
1214 | hdev->cur_adv_instance = instance; | ||
1215 | __hci_req_update_adv_data(req, instance); | ||
1216 | __hci_req_update_scan_rsp_data(req, instance); | ||
1217 | __hci_req_enable_advertising(req); | ||
1218 | |||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | static void cancel_adv_timeout(struct hci_dev *hdev) | ||
1223 | { | ||
1224 | if (hdev->adv_instance_timeout) { | ||
1225 | hdev->adv_instance_timeout = 0; | ||
1226 | cancel_delayed_work(&hdev->adv_instance_expire); | ||
1227 | } | ||
1228 | } | ||
1229 | |||
1230 | /* For a single instance: | ||
1231 | * - force == true: The instance will be removed even when its remaining | ||
1232 | * lifetime is not zero. | ||
1233 | * - force == false: the instance will be deactivated but kept stored unless | ||
1234 | * the remaining lifetime is zero. | ||
1235 | * | ||
1236 | * For instance == 0x00: | ||
1237 | * - force == true: All instances will be removed regardless of their timeout | ||
1238 | * setting. | ||
1239 | * - force == false: Only instances that have a timeout will be removed. | ||
1240 | */ | ||
1241 | void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req, | ||
1242 | u8 instance, bool force) | ||
1243 | { | ||
1244 | struct adv_info *adv_instance, *n, *next_instance = NULL; | ||
1245 | int err; | ||
1246 | u8 rem_inst; | ||
1247 | |||
1248 | /* Cancel any timeout concerning the removed instance(s). */ | ||
1249 | if (!instance || hdev->cur_adv_instance == instance) | ||
1250 | cancel_adv_timeout(hdev); | ||
1251 | |||
1252 | /* Get the next instance to advertise BEFORE we remove | ||
1253 | * the current one. This can be the same instance again | ||
1254 | * if there is only one instance. | ||
1255 | */ | ||
1256 | if (instance && hdev->cur_adv_instance == instance) | ||
1257 | next_instance = hci_get_next_instance(hdev, instance); | ||
1258 | |||
1259 | if (instance == 0x00) { | ||
1260 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, | ||
1261 | list) { | ||
1262 | if (!(force || adv_instance->timeout)) | ||
1263 | continue; | ||
1264 | |||
1265 | rem_inst = adv_instance->instance; | ||
1266 | err = hci_remove_adv_instance(hdev, rem_inst); | ||
1267 | if (!err) | ||
1268 | mgmt_advertising_removed(NULL, hdev, rem_inst); | ||
1269 | } | ||
1270 | } else { | ||
1271 | adv_instance = hci_find_adv_instance(hdev, instance); | ||
1272 | |||
1273 | if (force || (adv_instance && adv_instance->timeout && | ||
1274 | !adv_instance->remaining_time)) { | ||
1275 | /* Don't advertise a removed instance. */ | ||
1276 | if (next_instance && | ||
1277 | next_instance->instance == instance) | ||
1278 | next_instance = NULL; | ||
1279 | |||
1280 | err = hci_remove_adv_instance(hdev, instance); | ||
1281 | if (!err) | ||
1282 | mgmt_advertising_removed(NULL, hdev, instance); | ||
1283 | } | ||
1284 | } | ||
1285 | |||
1286 | if (!req || !hdev_is_powered(hdev) || | ||
1287 | hci_dev_test_flag(hdev, HCI_ADVERTISING)) | ||
1288 | return; | ||
1289 | |||
1290 | if (next_instance) | ||
1291 | __hci_req_schedule_adv_instance(req, next_instance->instance, | ||
1292 | false); | ||
1293 | } | ||
1294 | |||
305 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) | 1295 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) |
306 | { | 1296 | { |
307 | struct hci_dev *hdev = req->hdev; | 1297 | struct hci_dev *hdev = req->hdev; |
@@ -432,7 +1422,7 @@ static bool disconnected_whitelist_entries(struct hci_dev *hdev) | |||
432 | return false; | 1422 | return false; |
433 | } | 1423 | } |
434 | 1424 | ||
435 | void __hci_update_page_scan(struct hci_request *req) | 1425 | void __hci_req_update_scan(struct hci_request *req) |
436 | { | 1426 | { |
437 | struct hci_dev *hdev = req->hdev; | 1427 | struct hci_dev *hdev = req->hdev; |
438 | u8 scan; | 1428 | u8 scan; |
@@ -452,117 +1442,168 @@ void __hci_update_page_scan(struct hci_request *req) | |||
452 | else | 1442 | else |
453 | scan = SCAN_DISABLED; | 1443 | scan = SCAN_DISABLED; |
454 | 1444 | ||
455 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE)) | ||
456 | return; | ||
457 | |||
458 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | 1445 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
459 | scan |= SCAN_INQUIRY; | 1446 | scan |= SCAN_INQUIRY; |
460 | 1447 | ||
1448 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && | ||
1449 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) | ||
1450 | return; | ||
1451 | |||
461 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 1452 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
462 | } | 1453 | } |
463 | 1454 | ||
464 | void hci_update_page_scan(struct hci_dev *hdev) | 1455 | static int update_scan(struct hci_request *req, unsigned long opt) |
465 | { | 1456 | { |
466 | struct hci_request req; | 1457 | hci_dev_lock(req->hdev); |
1458 | __hci_req_update_scan(req); | ||
1459 | hci_dev_unlock(req->hdev); | ||
1460 | return 0; | ||
1461 | } | ||
467 | 1462 | ||
468 | hci_req_init(&req, hdev); | 1463 | static void scan_update_work(struct work_struct *work) |
469 | __hci_update_page_scan(&req); | 1464 | { |
470 | hci_req_run(&req, NULL); | 1465 | struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); |
1466 | |||
1467 | hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); | ||
471 | } | 1468 | } |
472 | 1469 | ||
473 | /* This function controls the background scanning based on hdev->pend_le_conns | 1470 | static int connectable_update(struct hci_request *req, unsigned long opt) |
474 | * list. If there are pending LE connection we start the background scanning, | ||
475 | * otherwise we stop it. | ||
476 | * | ||
477 | * This function requires the caller holds hdev->lock. | ||
478 | */ | ||
479 | void __hci_update_background_scan(struct hci_request *req) | ||
480 | { | 1471 | { |
481 | struct hci_dev *hdev = req->hdev; | 1472 | struct hci_dev *hdev = req->hdev; |
482 | 1473 | ||
483 | if (!test_bit(HCI_UP, &hdev->flags) || | 1474 | hci_dev_lock(hdev); |
484 | test_bit(HCI_INIT, &hdev->flags) || | 1475 | |
485 | hci_dev_test_flag(hdev, HCI_SETUP) || | 1476 | __hci_req_update_scan(req); |
486 | hci_dev_test_flag(hdev, HCI_CONFIG) || | 1477 | |
487 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || | 1478 | /* If BR/EDR is not enabled and we disable advertising as a |
488 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) | 1479 | * by-product of disabling connectable, we need to update the |
1480 | * advertising flags. | ||
1481 | */ | ||
1482 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | ||
1483 | __hci_req_update_adv_data(req, hdev->cur_adv_instance); | ||
1484 | |||
1485 | /* Update the advertising parameters if necessary */ | ||
1486 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | ||
1487 | !list_empty(&hdev->adv_instances)) | ||
1488 | __hci_req_enable_advertising(req); | ||
1489 | |||
1490 | __hci_update_background_scan(req); | ||
1491 | |||
1492 | hci_dev_unlock(hdev); | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | static void connectable_update_work(struct work_struct *work) | ||
1498 | { | ||
1499 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1500 | connectable_update); | ||
1501 | u8 status; | ||
1502 | |||
1503 | hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); | ||
1504 | mgmt_set_connectable_complete(hdev, status); | ||
1505 | } | ||
1506 | |||
1507 | static u8 get_service_classes(struct hci_dev *hdev) | ||
1508 | { | ||
1509 | struct bt_uuid *uuid; | ||
1510 | u8 val = 0; | ||
1511 | |||
1512 | list_for_each_entry(uuid, &hdev->uuids, list) | ||
1513 | val |= uuid->svc_hint; | ||
1514 | |||
1515 | return val; | ||
1516 | } | ||
1517 | |||
1518 | void __hci_req_update_class(struct hci_request *req) | ||
1519 | { | ||
1520 | struct hci_dev *hdev = req->hdev; | ||
1521 | u8 cod[3]; | ||
1522 | |||
1523 | BT_DBG("%s", hdev->name); | ||
1524 | |||
1525 | if (!hdev_is_powered(hdev)) | ||
489 | return; | 1526 | return; |
490 | 1527 | ||
491 | /* No point in doing scanning if LE support hasn't been enabled */ | 1528 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
492 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | ||
493 | return; | 1529 | return; |
494 | 1530 | ||
495 | /* If discovery is active don't interfere with it */ | 1531 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) |
496 | if (hdev->discovery.state != DISCOVERY_STOPPED) | ||
497 | return; | 1532 | return; |
498 | 1533 | ||
499 | /* Reset RSSI and UUID filters when starting background scanning | 1534 | cod[0] = hdev->minor_class; |
500 | * since these filters are meant for service discovery only. | 1535 | cod[1] = hdev->major_class; |
501 | * | 1536 | cod[2] = get_service_classes(hdev); |
502 | * The Start Discovery and Start Service Discovery operations | ||
503 | * ensure to set proper values for RSSI threshold and UUID | ||
504 | * filter list. So it is safe to just reset them here. | ||
505 | */ | ||
506 | hci_discovery_filter_clear(hdev); | ||
507 | 1537 | ||
508 | if (list_empty(&hdev->pend_le_conns) && | 1538 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) |
509 | list_empty(&hdev->pend_le_reports)) { | 1539 | cod[1] |= 0x20; |
510 | /* If there is no pending LE connections or devices | ||
511 | * to be scanned for, we should stop the background | ||
512 | * scanning. | ||
513 | */ | ||
514 | 1540 | ||
515 | /* If controller is not scanning we are done. */ | 1541 | if (memcmp(cod, hdev->dev_class, 3) == 0) |
516 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 1542 | return; |
517 | return; | ||
518 | 1543 | ||
519 | hci_req_add_le_scan_disable(req); | 1544 | hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); |
1545 | } | ||
520 | 1546 | ||
521 | BT_DBG("%s stopping background scanning", hdev->name); | 1547 | static void write_iac(struct hci_request *req) |
1548 | { | ||
1549 | struct hci_dev *hdev = req->hdev; | ||
1550 | struct hci_cp_write_current_iac_lap cp; | ||
1551 | |||
1552 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | ||
1553 | return; | ||
1554 | |||
1555 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { | ||
1556 | /* Limited discoverable mode */ | ||
1557 | cp.num_iac = min_t(u8, hdev->num_iac, 2); | ||
1558 | cp.iac_lap[0] = 0x00; /* LIAC */ | ||
1559 | cp.iac_lap[1] = 0x8b; | ||
1560 | cp.iac_lap[2] = 0x9e; | ||
1561 | cp.iac_lap[3] = 0x33; /* GIAC */ | ||
1562 | cp.iac_lap[4] = 0x8b; | ||
1563 | cp.iac_lap[5] = 0x9e; | ||
522 | } else { | 1564 | } else { |
523 | /* If there is at least one pending LE connection, we should | 1565 | /* General discoverable mode */ |
524 | * keep the background scan running. | 1566 | cp.num_iac = 1; |
525 | */ | 1567 | cp.iac_lap[0] = 0x33; /* GIAC */ |
1568 | cp.iac_lap[1] = 0x8b; | ||
1569 | cp.iac_lap[2] = 0x9e; | ||
1570 | } | ||
526 | 1571 | ||
527 | /* If controller is connecting, we should not start scanning | 1572 | hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, |
528 | * since some controllers are not able to scan and connect at | 1573 | (cp.num_iac * 3) + 1, &cp); |
529 | * the same time. | 1574 | } |
530 | */ | ||
531 | if (hci_lookup_le_connect(hdev)) | ||
532 | return; | ||
533 | 1575 | ||
534 | /* If controller is currently scanning, we stop it to ensure we | 1576 | static int discoverable_update(struct hci_request *req, unsigned long opt) |
535 | * don't miss any advertising (due to duplicates filter). | 1577 | { |
536 | */ | 1578 | struct hci_dev *hdev = req->hdev; |
537 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
538 | hci_req_add_le_scan_disable(req); | ||
539 | 1579 | ||
540 | hci_req_add_le_passive_scan(req); | 1580 | hci_dev_lock(hdev); |
541 | 1581 | ||
542 | BT_DBG("%s starting background scanning", hdev->name); | 1582 | if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { |
1583 | write_iac(req); | ||
1584 | __hci_req_update_scan(req); | ||
1585 | __hci_req_update_class(req); | ||
543 | } | 1586 | } |
544 | } | ||
545 | 1587 | ||
546 | static void update_background_scan_complete(struct hci_dev *hdev, u8 status, | 1588 | /* Advertising instances don't use the global discoverable setting, so |
547 | u16 opcode) | 1589 | * only update AD if advertising was enabled using Set Advertising. |
548 | { | 1590 | */ |
549 | if (status) | 1591 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
550 | BT_DBG("HCI request failed to update background scanning: " | 1592 | __hci_req_update_adv_data(req, 0x00); |
551 | "status 0x%2.2x", status); | ||
552 | } | ||
553 | 1593 | ||
554 | void hci_update_background_scan(struct hci_dev *hdev) | 1594 | hci_dev_unlock(hdev); |
555 | { | ||
556 | int err; | ||
557 | struct hci_request req; | ||
558 | 1595 | ||
559 | hci_req_init(&req, hdev); | 1596 | return 0; |
1597 | } | ||
560 | 1598 | ||
561 | __hci_update_background_scan(&req); | 1599 | static void discoverable_update_work(struct work_struct *work) |
1600 | { | ||
1601 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1602 | discoverable_update); | ||
1603 | u8 status; | ||
562 | 1604 | ||
563 | err = hci_req_run(&req, update_background_scan_complete); | 1605 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); |
564 | if (err && err != -ENODATA) | 1606 | mgmt_set_discoverable_complete(hdev, status); |
565 | BT_ERR("Failed to run HCI request: err %d", err); | ||
566 | } | 1607 | } |
567 | 1608 | ||
568 | void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, | 1609 | void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, |
@@ -657,3 +1698,574 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) | |||
657 | 1698 | ||
658 | return 0; | 1699 | return 0; |
659 | } | 1700 | } |
1701 | |||
1702 | static int update_bg_scan(struct hci_request *req, unsigned long opt) | ||
1703 | { | ||
1704 | hci_dev_lock(req->hdev); | ||
1705 | __hci_update_background_scan(req); | ||
1706 | hci_dev_unlock(req->hdev); | ||
1707 | return 0; | ||
1708 | } | ||
1709 | |||
1710 | static void bg_scan_update(struct work_struct *work) | ||
1711 | { | ||
1712 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1713 | bg_scan_update); | ||
1714 | struct hci_conn *conn; | ||
1715 | u8 status; | ||
1716 | int err; | ||
1717 | |||
1718 | err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); | ||
1719 | if (!err) | ||
1720 | return; | ||
1721 | |||
1722 | hci_dev_lock(hdev); | ||
1723 | |||
1724 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | ||
1725 | if (conn) | ||
1726 | hci_le_conn_failed(conn, status); | ||
1727 | |||
1728 | hci_dev_unlock(hdev); | ||
1729 | } | ||
1730 | |||
1731 | static int le_scan_disable(struct hci_request *req, unsigned long opt) | ||
1732 | { | ||
1733 | hci_req_add_le_scan_disable(req); | ||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | static int bredr_inquiry(struct hci_request *req, unsigned long opt) | ||
1738 | { | ||
1739 | u8 length = opt; | ||
1740 | const u8 giac[3] = { 0x33, 0x8b, 0x9e }; | ||
1741 | const u8 liac[3] = { 0x00, 0x8b, 0x9e }; | ||
1742 | struct hci_cp_inquiry cp; | ||
1743 | |||
1744 | BT_DBG("%s", req->hdev->name); | ||
1745 | |||
1746 | hci_dev_lock(req->hdev); | ||
1747 | hci_inquiry_cache_flush(req->hdev); | ||
1748 | hci_dev_unlock(req->hdev); | ||
1749 | |||
1750 | memset(&cp, 0, sizeof(cp)); | ||
1751 | |||
1752 | if (req->hdev->discovery.limited) | ||
1753 | memcpy(&cp.lap, liac, sizeof(cp.lap)); | ||
1754 | else | ||
1755 | memcpy(&cp.lap, giac, sizeof(cp.lap)); | ||
1756 | |||
1757 | cp.length = length; | ||
1758 | |||
1759 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); | ||
1760 | |||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | static void le_scan_disable_work(struct work_struct *work) | ||
1765 | { | ||
1766 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1767 | le_scan_disable.work); | ||
1768 | u8 status; | ||
1769 | |||
1770 | BT_DBG("%s", hdev->name); | ||
1771 | |||
1772 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
1773 | return; | ||
1774 | |||
1775 | cancel_delayed_work(&hdev->le_scan_restart); | ||
1776 | |||
1777 | hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); | ||
1778 | if (status) { | ||
1779 | BT_ERR("Failed to disable LE scan: status 0x%02x", status); | ||
1780 | return; | ||
1781 | } | ||
1782 | |||
1783 | hdev->discovery.scan_start = 0; | ||
1784 | |||
1785 | /* If we were running LE only scan, change discovery state. If | ||
1786 | * we were running both LE and BR/EDR inquiry simultaneously, | ||
1787 | * and BR/EDR inquiry is already finished, stop discovery, | ||
1788 | * otherwise BR/EDR inquiry will stop discovery when finished. | ||
1789 | * If we will resolve remote device name, do not change | ||
1790 | * discovery state. | ||
1791 | */ | ||
1792 | |||
1793 | if (hdev->discovery.type == DISCOV_TYPE_LE) | ||
1794 | goto discov_stopped; | ||
1795 | |||
1796 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) | ||
1797 | return; | ||
1798 | |||
1799 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { | ||
1800 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && | ||
1801 | hdev->discovery.state != DISCOVERY_RESOLVING) | ||
1802 | goto discov_stopped; | ||
1803 | |||
1804 | return; | ||
1805 | } | ||
1806 | |||
1807 | hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, | ||
1808 | HCI_CMD_TIMEOUT, &status); | ||
1809 | if (status) { | ||
1810 | BT_ERR("Inquiry failed: status 0x%02x", status); | ||
1811 | goto discov_stopped; | ||
1812 | } | ||
1813 | |||
1814 | return; | ||
1815 | |||
1816 | discov_stopped: | ||
1817 | hci_dev_lock(hdev); | ||
1818 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | ||
1819 | hci_dev_unlock(hdev); | ||
1820 | } | ||
1821 | |||
1822 | static int le_scan_restart(struct hci_request *req, unsigned long opt) | ||
1823 | { | ||
1824 | struct hci_dev *hdev = req->hdev; | ||
1825 | struct hci_cp_le_set_scan_enable cp; | ||
1826 | |||
1827 | /* If controller is not scanning we are done. */ | ||
1828 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
1829 | return 0; | ||
1830 | |||
1831 | hci_req_add_le_scan_disable(req); | ||
1832 | |||
1833 | memset(&cp, 0, sizeof(cp)); | ||
1834 | cp.enable = LE_SCAN_ENABLE; | ||
1835 | cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | ||
1836 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | ||
1837 | |||
1838 | return 0; | ||
1839 | } | ||
1840 | |||
1841 | static void le_scan_restart_work(struct work_struct *work) | ||
1842 | { | ||
1843 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
1844 | le_scan_restart.work); | ||
1845 | unsigned long timeout, duration, scan_start, now; | ||
1846 | u8 status; | ||
1847 | |||
1848 | BT_DBG("%s", hdev->name); | ||
1849 | |||
1850 | hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); | ||
1851 | if (status) { | ||
1852 | BT_ERR("Failed to restart LE scan: status %d", status); | ||
1853 | return; | ||
1854 | } | ||
1855 | |||
1856 | hci_dev_lock(hdev); | ||
1857 | |||
1858 | if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || | ||
1859 | !hdev->discovery.scan_start) | ||
1860 | goto unlock; | ||
1861 | |||
1862 | /* When the scan was started, hdev->le_scan_disable has been queued | ||
1863 | * after duration from scan_start. During scan restart this job | ||
1864 | * has been canceled, and we need to queue it again after proper | ||
1865 | * timeout, to make sure that scan does not run indefinitely. | ||
1866 | */ | ||
1867 | duration = hdev->discovery.scan_duration; | ||
1868 | scan_start = hdev->discovery.scan_start; | ||
1869 | now = jiffies; | ||
1870 | if (now - scan_start <= duration) { | ||
1871 | int elapsed; | ||
1872 | |||
1873 | if (now >= scan_start) | ||
1874 | elapsed = now - scan_start; | ||
1875 | else | ||
1876 | elapsed = ULONG_MAX - scan_start + now; | ||
1877 | |||
1878 | timeout = duration - elapsed; | ||
1879 | } else { | ||
1880 | timeout = 0; | ||
1881 | } | ||
1882 | |||
1883 | queue_delayed_work(hdev->req_workqueue, | ||
1884 | &hdev->le_scan_disable, timeout); | ||
1885 | |||
1886 | unlock: | ||
1887 | hci_dev_unlock(hdev); | ||
1888 | } | ||
1889 | |||
1890 | static void disable_advertising(struct hci_request *req) | ||
1891 | { | ||
1892 | u8 enable = 0x00; | ||
1893 | |||
1894 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | ||
1895 | } | ||
1896 | |||
1897 | static int active_scan(struct hci_request *req, unsigned long opt) | ||
1898 | { | ||
1899 | uint16_t interval = opt; | ||
1900 | struct hci_dev *hdev = req->hdev; | ||
1901 | struct hci_cp_le_set_scan_param param_cp; | ||
1902 | struct hci_cp_le_set_scan_enable enable_cp; | ||
1903 | u8 own_addr_type; | ||
1904 | int err; | ||
1905 | |||
1906 | BT_DBG("%s", hdev->name); | ||
1907 | |||
1908 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { | ||
1909 | hci_dev_lock(hdev); | ||
1910 | |||
1911 | /* Don't let discovery abort an outgoing connection attempt | ||
1912 | * that's using directed advertising. | ||
1913 | */ | ||
1914 | if (hci_lookup_le_connect(hdev)) { | ||
1915 | hci_dev_unlock(hdev); | ||
1916 | return -EBUSY; | ||
1917 | } | ||
1918 | |||
1919 | cancel_adv_timeout(hdev); | ||
1920 | hci_dev_unlock(hdev); | ||
1921 | |||
1922 | disable_advertising(req); | ||
1923 | } | ||
1924 | |||
1925 | /* If controller is scanning, it means the background scanning is | ||
1926 | * running. Thus, we should temporarily stop it in order to set the | ||
1927 | * discovery scanning parameters. | ||
1928 | */ | ||
1929 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | ||
1930 | hci_req_add_le_scan_disable(req); | ||
1931 | |||
1932 | /* All active scans will be done with either a resolvable private | ||
1933 | * address (when privacy feature has been enabled) or non-resolvable | ||
1934 | * private address. | ||
1935 | */ | ||
1936 | err = hci_update_random_address(req, true, &own_addr_type); | ||
1937 | if (err < 0) | ||
1938 | own_addr_type = ADDR_LE_DEV_PUBLIC; | ||
1939 | |||
1940 | memset(¶m_cp, 0, sizeof(param_cp)); | ||
1941 | param_cp.type = LE_SCAN_ACTIVE; | ||
1942 | param_cp.interval = cpu_to_le16(interval); | ||
1943 | param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); | ||
1944 | param_cp.own_address_type = own_addr_type; | ||
1945 | |||
1946 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | ||
1947 | ¶m_cp); | ||
1948 | |||
1949 | memset(&enable_cp, 0, sizeof(enable_cp)); | ||
1950 | enable_cp.enable = LE_SCAN_ENABLE; | ||
1951 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | ||
1952 | |||
1953 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | ||
1954 | &enable_cp); | ||
1955 | |||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1959 | static int interleaved_discov(struct hci_request *req, unsigned long opt) | ||
1960 | { | ||
1961 | int err; | ||
1962 | |||
1963 | BT_DBG("%s", req->hdev->name); | ||
1964 | |||
1965 | err = active_scan(req, opt); | ||
1966 | if (err) | ||
1967 | return err; | ||
1968 | |||
1969 | return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); | ||
1970 | } | ||
1971 | |||
1972 | static void start_discovery(struct hci_dev *hdev, u8 *status) | ||
1973 | { | ||
1974 | unsigned long timeout; | ||
1975 | |||
1976 | BT_DBG("%s type %u", hdev->name, hdev->discovery.type); | ||
1977 | |||
1978 | switch (hdev->discovery.type) { | ||
1979 | case DISCOV_TYPE_BREDR: | ||
1980 | if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) | ||
1981 | hci_req_sync(hdev, bredr_inquiry, | ||
1982 | DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, | ||
1983 | status); | ||
1984 | return; | ||
1985 | case DISCOV_TYPE_INTERLEAVED: | ||
1986 | /* When running simultaneous discovery, the LE scanning time | ||
1987 | * should occupy the whole discovery time sine BR/EDR inquiry | ||
1988 | * and LE scanning are scheduled by the controller. | ||
1989 | * | ||
1990 | * For interleaving discovery in comparison, BR/EDR inquiry | ||
1991 | * and LE scanning are done sequentially with separate | ||
1992 | * timeouts. | ||
1993 | */ | ||
1994 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, | ||
1995 | &hdev->quirks)) { | ||
1996 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | ||
1997 | /* During simultaneous discovery, we double LE scan | ||
1998 | * interval. We must leave some time for the controller | ||
1999 | * to do BR/EDR inquiry. | ||
2000 | */ | ||
2001 | hci_req_sync(hdev, interleaved_discov, | ||
2002 | DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, | ||
2003 | status); | ||
2004 | break; | ||
2005 | } | ||
2006 | |||
2007 | timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); | ||
2008 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | ||
2009 | HCI_CMD_TIMEOUT, status); | ||
2010 | break; | ||
2011 | case DISCOV_TYPE_LE: | ||
2012 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | ||
2013 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | ||
2014 | HCI_CMD_TIMEOUT, status); | ||
2015 | break; | ||
2016 | default: | ||
2017 | *status = HCI_ERROR_UNSPECIFIED; | ||
2018 | return; | ||
2019 | } | ||
2020 | |||
2021 | if (*status) | ||
2022 | return; | ||
2023 | |||
2024 | BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); | ||
2025 | |||
2026 | /* When service discovery is used and the controller has a | ||
2027 | * strict duplicate filter, it is important to remember the | ||
2028 | * start and duration of the scan. This is required for | ||
2029 | * restarting scanning during the discovery phase. | ||
2030 | */ | ||
2031 | if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && | ||
2032 | hdev->discovery.result_filtering) { | ||
2033 | hdev->discovery.scan_start = jiffies; | ||
2034 | hdev->discovery.scan_duration = timeout; | ||
2035 | } | ||
2036 | |||
2037 | queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, | ||
2038 | timeout); | ||
2039 | } | ||
2040 | |||
2041 | bool hci_req_stop_discovery(struct hci_request *req) | ||
2042 | { | ||
2043 | struct hci_dev *hdev = req->hdev; | ||
2044 | struct discovery_state *d = &hdev->discovery; | ||
2045 | struct hci_cp_remote_name_req_cancel cp; | ||
2046 | struct inquiry_entry *e; | ||
2047 | bool ret = false; | ||
2048 | |||
2049 | BT_DBG("%s state %u", hdev->name, hdev->discovery.state); | ||
2050 | |||
2051 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { | ||
2052 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | ||
2053 | hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); | ||
2054 | |||
2055 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | ||
2056 | cancel_delayed_work(&hdev->le_scan_disable); | ||
2057 | hci_req_add_le_scan_disable(req); | ||
2058 | } | ||
2059 | |||
2060 | ret = true; | ||
2061 | } else { | ||
2062 | /* Passive scanning */ | ||
2063 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | ||
2064 | hci_req_add_le_scan_disable(req); | ||
2065 | ret = true; | ||
2066 | } | ||
2067 | } | ||
2068 | |||
2069 | /* No further actions needed for LE-only discovery */ | ||
2070 | if (d->type == DISCOV_TYPE_LE) | ||
2071 | return ret; | ||
2072 | |||
2073 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { | ||
2074 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, | ||
2075 | NAME_PENDING); | ||
2076 | if (!e) | ||
2077 | return ret; | ||
2078 | |||
2079 | bacpy(&cp.bdaddr, &e->data.bdaddr); | ||
2080 | hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), | ||
2081 | &cp); | ||
2082 | ret = true; | ||
2083 | } | ||
2084 | |||
2085 | return ret; | ||
2086 | } | ||
2087 | |||
2088 | static int stop_discovery(struct hci_request *req, unsigned long opt) | ||
2089 | { | ||
2090 | hci_dev_lock(req->hdev); | ||
2091 | hci_req_stop_discovery(req); | ||
2092 | hci_dev_unlock(req->hdev); | ||
2093 | |||
2094 | return 0; | ||
2095 | } | ||
2096 | |||
2097 | static void discov_update(struct work_struct *work) | ||
2098 | { | ||
2099 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
2100 | discov_update); | ||
2101 | u8 status = 0; | ||
2102 | |||
2103 | switch (hdev->discovery.state) { | ||
2104 | case DISCOVERY_STARTING: | ||
2105 | start_discovery(hdev, &status); | ||
2106 | mgmt_start_discovery_complete(hdev, status); | ||
2107 | if (status) | ||
2108 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | ||
2109 | else | ||
2110 | hci_discovery_set_state(hdev, DISCOVERY_FINDING); | ||
2111 | break; | ||
2112 | case DISCOVERY_STOPPING: | ||
2113 | hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); | ||
2114 | mgmt_stop_discovery_complete(hdev, status); | ||
2115 | if (!status) | ||
2116 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | ||
2117 | break; | ||
2118 | case DISCOVERY_STOPPED: | ||
2119 | default: | ||
2120 | return; | ||
2121 | } | ||
2122 | } | ||
2123 | |||
2124 | static void discov_off(struct work_struct *work) | ||
2125 | { | ||
2126 | struct hci_dev *hdev = container_of(work, struct hci_dev, | ||
2127 | discov_off.work); | ||
2128 | |||
2129 | BT_DBG("%s", hdev->name); | ||
2130 | |||
2131 | hci_dev_lock(hdev); | ||
2132 | |||
2133 | /* When discoverable timeout triggers, then just make sure | ||
2134 | * the limited discoverable flag is cleared. Even in the case | ||
2135 | * of a timeout triggered from general discoverable, it is | ||
2136 | * safe to unconditionally clear the flag. | ||
2137 | */ | ||
2138 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); | ||
2139 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); | ||
2140 | hdev->discov_timeout = 0; | ||
2141 | |||
2142 | hci_dev_unlock(hdev); | ||
2143 | |||
2144 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); | ||
2145 | mgmt_new_settings(hdev); | ||
2146 | } | ||
2147 | |||
2148 | static int powered_update_hci(struct hci_request *req, unsigned long opt) | ||
2149 | { | ||
2150 | struct hci_dev *hdev = req->hdev; | ||
2151 | u8 link_sec; | ||
2152 | |||
2153 | hci_dev_lock(hdev); | ||
2154 | |||
2155 | if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && | ||
2156 | !lmp_host_ssp_capable(hdev)) { | ||
2157 | u8 mode = 0x01; | ||
2158 | |||
2159 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); | ||
2160 | |||
2161 | if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { | ||
2162 | u8 support = 0x01; | ||
2163 | |||
2164 | hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, | ||
2165 | sizeof(support), &support); | ||
2166 | } | ||
2167 | } | ||
2168 | |||
2169 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && | ||
2170 | lmp_bredr_capable(hdev)) { | ||
2171 | struct hci_cp_write_le_host_supported cp; | ||
2172 | |||
2173 | cp.le = 0x01; | ||
2174 | cp.simul = 0x00; | ||
2175 | |||
2176 | /* Check first if we already have the right | ||
2177 | * host state (host features set) | ||
2178 | */ | ||
2179 | if (cp.le != lmp_host_le_capable(hdev) || | ||
2180 | cp.simul != lmp_host_le_br_capable(hdev)) | ||
2181 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, | ||
2182 | sizeof(cp), &cp); | ||
2183 | } | ||
2184 | |||
2185 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { | ||
2186 | /* Make sure the controller has a good default for | ||
2187 | * advertising data. This also applies to the case | ||
2188 | * where BR/EDR was toggled during the AUTO_OFF phase. | ||
2189 | */ | ||
2190 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | ||
2191 | list_empty(&hdev->adv_instances)) { | ||
2192 | __hci_req_update_adv_data(req, 0x00); | ||
2193 | __hci_req_update_scan_rsp_data(req, 0x00); | ||
2194 | |||
2195 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) | ||
2196 | __hci_req_enable_advertising(req); | ||
2197 | } else if (!list_empty(&hdev->adv_instances)) { | ||
2198 | struct adv_info *adv_instance; | ||
2199 | |||
2200 | adv_instance = list_first_entry(&hdev->adv_instances, | ||
2201 | struct adv_info, list); | ||
2202 | __hci_req_schedule_adv_instance(req, | ||
2203 | adv_instance->instance, | ||
2204 | true); | ||
2205 | } | ||
2206 | } | ||
2207 | |||
2208 | link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); | ||
2209 | if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) | ||
2210 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, | ||
2211 | sizeof(link_sec), &link_sec); | ||
2212 | |||
2213 | if (lmp_bredr_capable(hdev)) { | ||
2214 | if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) | ||
2215 | __hci_req_write_fast_connectable(req, true); | ||
2216 | else | ||
2217 | __hci_req_write_fast_connectable(req, false); | ||
2218 | __hci_req_update_scan(req); | ||
2219 | __hci_req_update_class(req); | ||
2220 | __hci_req_update_name(req); | ||
2221 | __hci_req_update_eir(req); | ||
2222 | } | ||
2223 | |||
2224 | hci_dev_unlock(hdev); | ||
2225 | return 0; | ||
2226 | } | ||
2227 | |||
2228 | int __hci_req_hci_power_on(struct hci_dev *hdev) | ||
2229 | { | ||
2230 | /* Register the available SMP channels (BR/EDR and LE) only when | ||
2231 | * successfully powering on the controller. This late | ||
2232 | * registration is required so that LE SMP can clearly decide if | ||
2233 | * the public address or static address is used. | ||
2234 | */ | ||
2235 | smp_register(hdev); | ||
2236 | |||
2237 | return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, | ||
2238 | NULL); | ||
2239 | } | ||
2240 | |||
2241 | void hci_request_setup(struct hci_dev *hdev) | ||
2242 | { | ||
2243 | INIT_WORK(&hdev->discov_update, discov_update); | ||
2244 | INIT_WORK(&hdev->bg_scan_update, bg_scan_update); | ||
2245 | INIT_WORK(&hdev->scan_update, scan_update_work); | ||
2246 | INIT_WORK(&hdev->connectable_update, connectable_update_work); | ||
2247 | INIT_WORK(&hdev->discoverable_update, discoverable_update_work); | ||
2248 | INIT_DELAYED_WORK(&hdev->discov_off, discov_off); | ||
2249 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); | ||
2250 | INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); | ||
2251 | INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); | ||
2252 | } | ||
2253 | |||
2254 | void hci_request_cancel_all(struct hci_dev *hdev) | ||
2255 | { | ||
2256 | hci_req_sync_cancel(hdev, ENODEV); | ||
2257 | |||
2258 | cancel_work_sync(&hdev->discov_update); | ||
2259 | cancel_work_sync(&hdev->bg_scan_update); | ||
2260 | cancel_work_sync(&hdev->scan_update); | ||
2261 | cancel_work_sync(&hdev->connectable_update); | ||
2262 | cancel_work_sync(&hdev->discoverable_update); | ||
2263 | cancel_delayed_work_sync(&hdev->discov_off); | ||
2264 | cancel_delayed_work_sync(&hdev->le_scan_disable); | ||
2265 | cancel_delayed_work_sync(&hdev->le_scan_restart); | ||
2266 | |||
2267 | if (hdev->adv_instance_timeout) { | ||
2268 | cancel_delayed_work_sync(&hdev->adv_instance_expire); | ||
2269 | hdev->adv_instance_timeout = 0; | ||
2270 | } | ||
2271 | } | ||