Bluetooth: Move discoverable timeout behind hdev->req_workqueue
[deliverable/linux.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
f2252570 26#include <net/bluetooth/mgmt.h>
0857dd3b
JH
27
28#include "smp.h"
29#include "hci_request.h"
30
be91cd05
JH
31#define HCI_REQ_DONE 0
32#define HCI_REQ_PEND 1
33#define HCI_REQ_CANCELED 2
34
0857dd3b
JH
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
e6214487
JH
42static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
44{
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
48
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
53 */
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
57 }
58
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
62
63 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
64 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69 }
0857dd3b
JH
70
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75 queue_work(hdev->workqueue, &hdev->cmd_work);
76
77 return 0;
78}
79
e6214487
JH
80int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81{
82 return req_run(req, complete, NULL);
83}
84
85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86{
87 return req_run(req, NULL, complete);
88}
89
be91cd05
JH
90static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
92{
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
b504430c 104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
122
123 BT_DBG("%s", hdev->name);
124
125 hci_req_init(&req, hdev);
126
127 hci_req_add_ev(&req, opcode, plen, param, event);
128
129 hdev->req_status = HCI_REQ_PEND;
130
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
139 }
140
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
171 }
172
173 if (!skb)
174 return ERR_PTR(-ENODATA);
175
176 return skb;
177}
178EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
182{
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184}
185EXPORT_SYMBOL(__hci_cmd_sync);
186
187/* Execute request and wait for completion. */
a1d01db1
JH
188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
4ebeee2d 190 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
191{
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
195
196 BT_DBG("%s start", hdev->name);
197
198 hci_req_init(&req, hdev);
199
200 hdev->req_status = HCI_REQ_PEND;
201
a1d01db1
JH
202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
207 }
be91cd05
JH
208
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
223 */
568f44f6
JH
224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
be91cd05 227 return 0;
568f44f6
JH
228 }
229
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
232
233 return err;
234 }
235
236 schedule_timeout(timeout);
237
238 remove_wait_queue(&hdev->req_wait_q, &wait);
239
240 if (signal_pending(current))
241 return -EINTR;
242
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
246 if (hci_status)
247 *hci_status = hdev->req_result;
be91cd05
JH
248 break;
249
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
4ebeee2d
JH
252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
254 break;
255
256 default:
257 err = -ETIMEDOUT;
4ebeee2d
JH
258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
260 break;
261 }
262
263 hdev->req_status = hdev->req_result = 0;
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
267 return err;
268}
269
a1d01db1
JH
270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
4ebeee2d 272 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
273{
274 int ret;
275
276 if (!test_bit(HCI_UP, &hdev->flags))
277 return -ENETDOWN;
278
279 /* Serialize all requests */
b504430c 280 hci_req_sync_lock(hdev);
4ebeee2d 281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 282 hci_req_sync_unlock(hdev);
be91cd05
JH
283
284 return ret;
285}
286
0857dd3b
JH
287struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 const void *param)
289{
290 int len = HCI_COMMAND_HDR_SIZE + plen;
291 struct hci_command_hdr *hdr;
292 struct sk_buff *skb;
293
294 skb = bt_skb_alloc(len, GFP_ATOMIC);
295 if (!skb)
296 return NULL;
297
298 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
299 hdr->opcode = cpu_to_le16(opcode);
300 hdr->plen = plen;
301
302 if (plen)
303 memcpy(skb_put(skb, plen), param, plen);
304
305 BT_DBG("skb len %d", skb->len);
306
d79f34e3
MH
307 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
308 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
309
310 return skb;
311}
312
313/* Queue a command to an asynchronous HCI request */
314void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
315 const void *param, u8 event)
316{
317 struct hci_dev *hdev = req->hdev;
318 struct sk_buff *skb;
319
320 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
321
322 /* If an error occurred during request building, there is no point in
323 * queueing the HCI command. We can simply return.
324 */
325 if (req->err)
326 return;
327
328 skb = hci_prepare_cmd(hdev, opcode, plen, param);
329 if (!skb) {
330 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
331 hdev->name, opcode);
332 req->err = -ENOMEM;
333 return;
334 }
335
336 if (skb_queue_empty(&req->cmd_q))
44d27137 337 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 338
242c0ebd 339 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
340
341 skb_queue_tail(&req->cmd_q, skb);
342}
343
344void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 const void *param)
346{
347 hci_req_add_ev(req, opcode, plen, param, 0);
348}
349
196a5e97
JH
350/* This function controls the background scanning based on hdev->pend_le_conns
351 * list. If there are pending LE connection we start the background scanning,
352 * otherwise we stop it.
353 *
354 * This function requires the caller holds hdev->lock.
355 */
356static void __hci_update_background_scan(struct hci_request *req)
357{
358 struct hci_dev *hdev = req->hdev;
359
360 if (!test_bit(HCI_UP, &hdev->flags) ||
361 test_bit(HCI_INIT, &hdev->flags) ||
362 hci_dev_test_flag(hdev, HCI_SETUP) ||
363 hci_dev_test_flag(hdev, HCI_CONFIG) ||
364 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
365 hci_dev_test_flag(hdev, HCI_UNREGISTER))
366 return;
367
368 /* No point in doing scanning if LE support hasn't been enabled */
369 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
370 return;
371
372 /* If discovery is active don't interfere with it */
373 if (hdev->discovery.state != DISCOVERY_STOPPED)
374 return;
375
376 /* Reset RSSI and UUID filters when starting background scanning
377 * since these filters are meant for service discovery only.
378 *
379 * The Start Discovery and Start Service Discovery operations
380 * ensure to set proper values for RSSI threshold and UUID
381 * filter list. So it is safe to just reset them here.
382 */
383 hci_discovery_filter_clear(hdev);
384
385 if (list_empty(&hdev->pend_le_conns) &&
386 list_empty(&hdev->pend_le_reports)) {
387 /* If there is no pending LE connections or devices
388 * to be scanned for, we should stop the background
389 * scanning.
390 */
391
392 /* If controller is not scanning we are done. */
393 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
394 return;
395
396 hci_req_add_le_scan_disable(req);
397
398 BT_DBG("%s stopping background scanning", hdev->name);
399 } else {
400 /* If there is at least one pending LE connection, we should
401 * keep the background scan running.
402 */
403
404 /* If controller is connecting, we should not start scanning
405 * since some controllers are not able to scan and connect at
406 * the same time.
407 */
408 if (hci_lookup_le_connect(hdev))
409 return;
410
411 /* If controller is currently scanning, we stop it to ensure we
412 * don't miss any advertising (due to duplicates filter).
413 */
414 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
415 hci_req_add_le_scan_disable(req);
416
417 hci_req_add_le_passive_scan(req);
418
419 BT_DBG("%s starting background scanning", hdev->name);
420 }
421}
422
0857dd3b
JH
423void hci_req_add_le_scan_disable(struct hci_request *req)
424{
425 struct hci_cp_le_set_scan_enable cp;
426
427 memset(&cp, 0, sizeof(cp));
428 cp.enable = LE_SCAN_DISABLE;
429 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
430}
431
432static void add_to_white_list(struct hci_request *req,
433 struct hci_conn_params *params)
434{
435 struct hci_cp_le_add_to_white_list cp;
436
437 cp.bdaddr_type = params->addr_type;
438 bacpy(&cp.bdaddr, &params->addr);
439
440 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
441}
442
443static u8 update_white_list(struct hci_request *req)
444{
445 struct hci_dev *hdev = req->hdev;
446 struct hci_conn_params *params;
447 struct bdaddr_list *b;
448 uint8_t white_list_entries = 0;
449
450 /* Go through the current white list programmed into the
451 * controller one by one and check if that address is still
452 * in the list of pending connections or list of devices to
453 * report. If not present in either list, then queue the
454 * command to remove it from the controller.
455 */
456 list_for_each_entry(b, &hdev->le_white_list, list) {
457 struct hci_cp_le_del_from_white_list cp;
458
459 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
460 &b->bdaddr, b->bdaddr_type) ||
461 hci_pend_le_action_lookup(&hdev->pend_le_reports,
462 &b->bdaddr, b->bdaddr_type)) {
463 white_list_entries++;
464 continue;
465 }
466
467 cp.bdaddr_type = b->bdaddr_type;
468 bacpy(&cp.bdaddr, &b->bdaddr);
469
470 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
471 sizeof(cp), &cp);
472 }
473
474 /* Since all no longer valid white list entries have been
475 * removed, walk through the list of pending connections
476 * and ensure that any new device gets programmed into
477 * the controller.
478 *
479 * If the list of the devices is larger than the list of
480 * available white list entries in the controller, then
481 * just abort and return filer policy value to not use the
482 * white list.
483 */
484 list_for_each_entry(params, &hdev->pend_le_conns, action) {
485 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
486 &params->addr, params->addr_type))
487 continue;
488
489 if (white_list_entries >= hdev->le_white_list_size) {
490 /* Select filter policy to accept all advertising */
491 return 0x00;
492 }
493
494 if (hci_find_irk_by_addr(hdev, &params->addr,
495 params->addr_type)) {
496 /* White list can not be used with RPAs */
497 return 0x00;
498 }
499
500 white_list_entries++;
501 add_to_white_list(req, params);
502 }
503
504 /* After adding all new pending connections, walk through
505 * the list of pending reports and also add these to the
506 * white list if there is still space.
507 */
508 list_for_each_entry(params, &hdev->pend_le_reports, action) {
509 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
510 &params->addr, params->addr_type))
511 continue;
512
513 if (white_list_entries >= hdev->le_white_list_size) {
514 /* Select filter policy to accept all advertising */
515 return 0x00;
516 }
517
518 if (hci_find_irk_by_addr(hdev, &params->addr,
519 params->addr_type)) {
520 /* White list can not be used with RPAs */
521 return 0x00;
522 }
523
524 white_list_entries++;
525 add_to_white_list(req, params);
526 }
527
528 /* Select filter policy to use white list */
529 return 0x01;
530}
531
532void hci_req_add_le_passive_scan(struct hci_request *req)
533{
534 struct hci_cp_le_set_scan_param param_cp;
535 struct hci_cp_le_set_scan_enable enable_cp;
536 struct hci_dev *hdev = req->hdev;
537 u8 own_addr_type;
538 u8 filter_policy;
539
540 /* Set require_privacy to false since no SCAN_REQ are send
541 * during passive scanning. Not using an non-resolvable address
542 * here is important so that peer devices using direct
543 * advertising with our address will be correctly reported
544 * by the controller.
545 */
546 if (hci_update_random_address(req, false, &own_addr_type))
547 return;
548
549 /* Adding or removing entries from the white list must
550 * happen before enabling scanning. The controller does
551 * not allow white list modification while scanning.
552 */
553 filter_policy = update_white_list(req);
554
555 /* When the controller is using random resolvable addresses and
556 * with that having LE privacy enabled, then controllers with
557 * Extended Scanner Filter Policies support can now enable support
558 * for handling directed advertising.
559 *
560 * So instead of using filter polices 0x00 (no whitelist)
561 * and 0x01 (whitelist enabled) use the new filter policies
562 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
563 */
d7a5a11d 564 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
565 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
566 filter_policy |= 0x02;
567
568 memset(&param_cp, 0, sizeof(param_cp));
569 param_cp.type = LE_SCAN_PASSIVE;
570 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
571 param_cp.window = cpu_to_le16(hdev->le_scan_window);
572 param_cp.own_address_type = own_addr_type;
573 param_cp.filter_policy = filter_policy;
574 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
575 &param_cp);
576
577 memset(&enable_cp, 0, sizeof(enable_cp));
578 enable_cp.enable = LE_SCAN_ENABLE;
579 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
580 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
581 &enable_cp);
582}
583
f2252570
JH
584static u8 get_current_adv_instance(struct hci_dev *hdev)
585{
586 /* The "Set Advertising" setting supersedes the "Add Advertising"
587 * setting. Here we set the advertising data based on which
588 * setting was set. When neither apply, default to the global settings,
589 * represented by instance "0".
590 */
591 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
592 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
593 return hdev->cur_adv_instance;
594
595 return 0x00;
596}
597
598static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
599{
600 u8 instance = get_current_adv_instance(hdev);
601 struct adv_info *adv_instance;
602
603 /* Ignore instance 0 */
604 if (instance == 0x00)
605 return 0;
606
607 adv_instance = hci_find_adv_instance(hdev, instance);
608 if (!adv_instance)
609 return 0;
610
611 /* TODO: Take into account the "appearance" and "local-name" flags here.
612 * These are currently being ignored as they are not supported.
613 */
614 return adv_instance->scan_rsp_len;
615}
616
617void __hci_req_disable_advertising(struct hci_request *req)
618{
619 u8 enable = 0x00;
620
621 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
622}
623
624static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
625{
626 u32 flags;
627 struct adv_info *adv_instance;
628
629 if (instance == 0x00) {
630 /* Instance 0 always manages the "Tx Power" and "Flags"
631 * fields
632 */
633 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
634
635 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
636 * corresponds to the "connectable" instance flag.
637 */
638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
639 flags |= MGMT_ADV_FLAG_CONNECTABLE;
640
641 return flags;
642 }
643
644 adv_instance = hci_find_adv_instance(hdev, instance);
645
646 /* Return 0 when we got an invalid instance identifier. */
647 if (!adv_instance)
648 return 0;
649
650 return adv_instance->flags;
651}
652
653void __hci_req_enable_advertising(struct hci_request *req)
654{
655 struct hci_dev *hdev = req->hdev;
656 struct hci_cp_le_set_adv_param cp;
657 u8 own_addr_type, enable = 0x01;
658 bool connectable;
659 u8 instance;
660 u32 flags;
661
662 if (hci_conn_num(hdev, LE_LINK) > 0)
663 return;
664
665 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
666 __hci_req_disable_advertising(req);
667
668 /* Clear the HCI_LE_ADV bit temporarily so that the
669 * hci_update_random_address knows that it's safe to go ahead
670 * and write a new random address. The flag will be set back on
671 * as soon as the SET_ADV_ENABLE HCI command completes.
672 */
673 hci_dev_clear_flag(hdev, HCI_LE_ADV);
674
675 instance = get_current_adv_instance(hdev);
676 flags = get_adv_instance_flags(hdev, instance);
677
678 /* If the "connectable" instance flag was not set, then choose between
679 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
680 */
681 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
682 mgmt_get_connectable(hdev);
683
684 /* Set require_privacy to true only when non-connectable
685 * advertising is used. In that case it is fine to use a
686 * non-resolvable private address.
687 */
688 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
689 return;
690
691 memset(&cp, 0, sizeof(cp));
692 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
693 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
694
695 if (connectable)
696 cp.type = LE_ADV_IND;
697 else if (get_cur_adv_instance_scan_rsp_len(hdev))
698 cp.type = LE_ADV_SCAN_IND;
699 else
700 cp.type = LE_ADV_NONCONN_IND;
701
702 cp.own_address_type = own_addr_type;
703 cp.channel_map = hdev->le_adv_channel_map;
704
705 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
706
707 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
708}
709
710static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
711{
712 u8 ad_len = 0;
713 size_t name_len;
714
715 name_len = strlen(hdev->dev_name);
716 if (name_len > 0) {
717 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
718
719 if (name_len > max_len) {
720 name_len = max_len;
721 ptr[1] = EIR_NAME_SHORT;
722 } else
723 ptr[1] = EIR_NAME_COMPLETE;
724
725 ptr[0] = name_len + 1;
726
727 memcpy(ptr + 2, hdev->dev_name, name_len);
728
729 ad_len += (name_len + 2);
730 ptr += (name_len + 2);
731 }
732
733 return ad_len;
734}
735
736static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
737 u8 *ptr)
738{
739 struct adv_info *adv_instance;
740
741 adv_instance = hci_find_adv_instance(hdev, instance);
742 if (!adv_instance)
743 return 0;
744
745 /* TODO: Set the appropriate entries based on advertising instance flags
746 * here once flags other than 0 are supported.
747 */
748 memcpy(ptr, adv_instance->scan_rsp_data,
749 adv_instance->scan_rsp_len);
750
751 return adv_instance->scan_rsp_len;
752}
753
754static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
755{
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_le_set_scan_rsp_data cp;
758 u8 len;
759
760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
761 return;
762
763 memset(&cp, 0, sizeof(cp));
764
765 if (instance)
766 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
767 else
768 len = create_default_scan_rsp_data(hdev, cp.data);
769
770 if (hdev->scan_rsp_data_len == len &&
771 !memcmp(cp.data, hdev->scan_rsp_data, len))
772 return;
773
774 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
775 hdev->scan_rsp_data_len = len;
776
777 cp.length = len;
778
779 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
780}
781
782void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
783{
784 if (instance == HCI_ADV_CURRENT)
785 instance = get_current_adv_instance(req->hdev);
786
787 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
788}
789
790static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
791{
792 struct adv_info *adv_instance = NULL;
793 u8 ad_len = 0, flags = 0;
794 u32 instance_flags;
795
796 /* Return 0 when the current instance identifier is invalid. */
797 if (instance) {
798 adv_instance = hci_find_adv_instance(hdev, instance);
799 if (!adv_instance)
800 return 0;
801 }
802
803 instance_flags = get_adv_instance_flags(hdev, instance);
804
805 /* The Add Advertising command allows userspace to set both the general
806 * and limited discoverable flags.
807 */
808 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
809 flags |= LE_AD_GENERAL;
810
811 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
812 flags |= LE_AD_LIMITED;
813
814 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
815 /* If a discovery flag wasn't provided, simply use the global
816 * settings.
817 */
818 if (!flags)
819 flags |= mgmt_get_adv_discov_flags(hdev);
820
821 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
822 flags |= LE_AD_NO_BREDR;
823
824 /* If flags would still be empty, then there is no need to
825 * include the "Flags" AD field".
826 */
827 if (flags) {
828 ptr[0] = 0x02;
829 ptr[1] = EIR_FLAGS;
830 ptr[2] = flags;
831
832 ad_len += 3;
833 ptr += 3;
834 }
835 }
836
837 if (adv_instance) {
838 memcpy(ptr, adv_instance->adv_data,
839 adv_instance->adv_data_len);
840 ad_len += adv_instance->adv_data_len;
841 ptr += adv_instance->adv_data_len;
842 }
843
844 /* Provide Tx Power only if we can provide a valid value for it */
845 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
846 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
847 ptr[0] = 0x02;
848 ptr[1] = EIR_TX_POWER;
849 ptr[2] = (u8)hdev->adv_tx_power;
850
851 ad_len += 3;
852 ptr += 3;
853 }
854
855 return ad_len;
856}
857
858static void update_inst_adv_data(struct hci_request *req, u8 instance)
859{
860 struct hci_dev *hdev = req->hdev;
861 struct hci_cp_le_set_adv_data cp;
862 u8 len;
863
864 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
865 return;
866
867 memset(&cp, 0, sizeof(cp));
868
869 len = create_instance_adv_data(hdev, instance, cp.data);
870
871 /* There's nothing to do if the data hasn't changed */
872 if (hdev->adv_data_len == len &&
873 memcmp(cp.data, hdev->adv_data, len) == 0)
874 return;
875
876 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
877 hdev->adv_data_len = len;
878
879 cp.length = len;
880
881 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
882}
883
884void __hci_req_update_adv_data(struct hci_request *req, int instance)
885{
886 if (instance == HCI_ADV_CURRENT)
887 instance = get_current_adv_instance(req->hdev);
888
889 update_inst_adv_data(req, instance);
890}
891
892int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
893{
894 struct hci_request req;
895
896 hci_req_init(&req, hdev);
897 __hci_req_update_adv_data(&req, instance);
898
899 return hci_req_run(&req, NULL);
900}
901
902static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
903{
904 BT_DBG("%s status %u", hdev->name, status);
905}
906
907void hci_req_reenable_advertising(struct hci_dev *hdev)
908{
909 struct hci_request req;
910 u8 instance;
911
912 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
913 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
914 return;
915
916 instance = get_current_adv_instance(hdev);
917
918 hci_req_init(&req, hdev);
919
920 if (instance) {
921 __hci_req_schedule_adv_instance(&req, instance, true);
922 } else {
923 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
924 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
925 __hci_req_enable_advertising(&req);
926 }
927
928 hci_req_run(&req, adv_enable_complete);
929}
930
931static void adv_timeout_expire(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev,
934 adv_instance_expire.work);
935
936 struct hci_request req;
937 u8 instance;
938
939 BT_DBG("%s", hdev->name);
940
941 hci_dev_lock(hdev);
942
943 hdev->adv_instance_timeout = 0;
944
945 instance = get_current_adv_instance(hdev);
946 if (instance == 0x00)
947 goto unlock;
948
949 hci_req_init(&req, hdev);
950
951 hci_req_clear_adv_instance(hdev, &req, instance, false);
952
953 if (list_empty(&hdev->adv_instances))
954 __hci_req_disable_advertising(&req);
955
956 if (!skb_queue_empty(&req.cmd_q))
957 hci_req_run(&req, NULL);
958
959unlock:
960 hci_dev_unlock(hdev);
961}
962
963int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
964 bool force)
965{
966 struct hci_dev *hdev = req->hdev;
967 struct adv_info *adv_instance = NULL;
968 u16 timeout;
969
970 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
971 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
972 return -EPERM;
973
974 if (hdev->adv_instance_timeout)
975 return -EBUSY;
976
977 adv_instance = hci_find_adv_instance(hdev, instance);
978 if (!adv_instance)
979 return -ENOENT;
980
981 /* A zero timeout means unlimited advertising. As long as there is
982 * only one instance, duration should be ignored. We still set a timeout
983 * in case further instances are being added later on.
984 *
985 * If the remaining lifetime of the instance is more than the duration
986 * then the timeout corresponds to the duration, otherwise it will be
987 * reduced to the remaining instance lifetime.
988 */
989 if (adv_instance->timeout == 0 ||
990 adv_instance->duration <= adv_instance->remaining_time)
991 timeout = adv_instance->duration;
992 else
993 timeout = adv_instance->remaining_time;
994
995 /* The remaining time is being reduced unless the instance is being
996 * advertised without time limit.
997 */
998 if (adv_instance->timeout)
999 adv_instance->remaining_time =
1000 adv_instance->remaining_time - timeout;
1001
1002 hdev->adv_instance_timeout = timeout;
1003 queue_delayed_work(hdev->req_workqueue,
1004 &hdev->adv_instance_expire,
1005 msecs_to_jiffies(timeout * 1000));
1006
1007 /* If we're just re-scheduling the same instance again then do not
1008 * execute any HCI commands. This happens when a single instance is
1009 * being advertised.
1010 */
1011 if (!force && hdev->cur_adv_instance == instance &&
1012 hci_dev_test_flag(hdev, HCI_LE_ADV))
1013 return 0;
1014
1015 hdev->cur_adv_instance = instance;
1016 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1017 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1018 __hci_req_enable_advertising(req);
1019
1020 return 0;
1021}
1022
1023static void cancel_adv_timeout(struct hci_dev *hdev)
1024{
1025 if (hdev->adv_instance_timeout) {
1026 hdev->adv_instance_timeout = 0;
1027 cancel_delayed_work(&hdev->adv_instance_expire);
1028 }
1029}
1030
1031/* For a single instance:
1032 * - force == true: The instance will be removed even when its remaining
1033 * lifetime is not zero.
1034 * - force == false: the instance will be deactivated but kept stored unless
1035 * the remaining lifetime is zero.
1036 *
1037 * For instance == 0x00:
1038 * - force == true: All instances will be removed regardless of their timeout
1039 * setting.
1040 * - force == false: Only instances that have a timeout will be removed.
1041 */
1042void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1043 u8 instance, bool force)
1044{
1045 struct adv_info *adv_instance, *n, *next_instance = NULL;
1046 int err;
1047 u8 rem_inst;
1048
1049 /* Cancel any timeout concerning the removed instance(s). */
1050 if (!instance || hdev->cur_adv_instance == instance)
1051 cancel_adv_timeout(hdev);
1052
1053 /* Get the next instance to advertise BEFORE we remove
1054 * the current one. This can be the same instance again
1055 * if there is only one instance.
1056 */
1057 if (instance && hdev->cur_adv_instance == instance)
1058 next_instance = hci_get_next_instance(hdev, instance);
1059
1060 if (instance == 0x00) {
1061 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1062 list) {
1063 if (!(force || adv_instance->timeout))
1064 continue;
1065
1066 rem_inst = adv_instance->instance;
1067 err = hci_remove_adv_instance(hdev, rem_inst);
1068 if (!err)
1069 mgmt_advertising_removed(NULL, hdev, rem_inst);
1070 }
1071 hdev->cur_adv_instance = 0x00;
1072 } else {
1073 adv_instance = hci_find_adv_instance(hdev, instance);
1074
1075 if (force || (adv_instance && adv_instance->timeout &&
1076 !adv_instance->remaining_time)) {
1077 /* Don't advertise a removed instance. */
1078 if (next_instance &&
1079 next_instance->instance == instance)
1080 next_instance = NULL;
1081
1082 err = hci_remove_adv_instance(hdev, instance);
1083 if (!err)
1084 mgmt_advertising_removed(NULL, hdev, instance);
1085 }
1086 }
1087
1088 if (list_empty(&hdev->adv_instances)) {
1089 hdev->cur_adv_instance = 0x00;
1090 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1091 }
1092
1093 if (!req || !hdev_is_powered(hdev) ||
1094 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1095 return;
1096
1097 if (next_instance)
1098 __hci_req_schedule_adv_instance(req, next_instance->instance,
1099 false);
1100}
1101
0857dd3b
JH
1102static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1103{
1104 struct hci_dev *hdev = req->hdev;
1105
1106 /* If we're advertising or initiating an LE connection we can't
1107 * go ahead and change the random address at this time. This is
1108 * because the eventual initiator address used for the
1109 * subsequently created connection will be undefined (some
1110 * controllers use the new address and others the one we had
1111 * when the operation started).
1112 *
1113 * In this kind of scenario skip the update and let the random
1114 * address be updated at the next cycle.
1115 */
d7a5a11d 1116 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1117 hci_lookup_le_connect(hdev)) {
0857dd3b 1118 BT_DBG("Deferring random address update");
a1536da2 1119 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1120 return;
1121 }
1122
1123 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1124}
1125
1126int hci_update_random_address(struct hci_request *req, bool require_privacy,
1127 u8 *own_addr_type)
1128{
1129 struct hci_dev *hdev = req->hdev;
1130 int err;
1131
1132 /* If privacy is enabled use a resolvable private address. If
1133 * current RPA has expired or there is something else than
1134 * the current RPA in use, then generate a new one.
1135 */
d7a5a11d 1136 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
0857dd3b
JH
1137 int to;
1138
1139 *own_addr_type = ADDR_LE_DEV_RANDOM;
1140
a69d8927 1141 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1142 !bacmp(&hdev->random_addr, &hdev->rpa))
1143 return 0;
1144
1145 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1146 if (err < 0) {
1147 BT_ERR("%s failed to generate new RPA", hdev->name);
1148 return err;
1149 }
1150
1151 set_random_addr(req, &hdev->rpa);
1152
1153 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1154 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1155
1156 return 0;
1157 }
1158
1159 /* In case of required privacy without resolvable private address,
1160 * use an non-resolvable private address. This is useful for active
1161 * scanning and non-connectable advertising.
1162 */
1163 if (require_privacy) {
1164 bdaddr_t nrpa;
1165
1166 while (true) {
1167 /* The non-resolvable private address is generated
1168 * from random six bytes with the two most significant
1169 * bits cleared.
1170 */
1171 get_random_bytes(&nrpa, 6);
1172 nrpa.b[5] &= 0x3f;
1173
1174 /* The non-resolvable private address shall not be
1175 * equal to the public address.
1176 */
1177 if (bacmp(&hdev->bdaddr, &nrpa))
1178 break;
1179 }
1180
1181 *own_addr_type = ADDR_LE_DEV_RANDOM;
1182 set_random_addr(req, &nrpa);
1183 return 0;
1184 }
1185
1186 /* If forcing static address is in use or there is no public
1187 * address use the static address as random address (but skip
1188 * the HCI command if the current random address is already the
1189 * static one.
50b5b952
MH
1190 *
1191 * In case BR/EDR has been disabled on a dual-mode controller
1192 * and a static address has been configured, then use that
1193 * address instead of the public BR/EDR address.
0857dd3b 1194 */
b7cb93e5 1195 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1196 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1197 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1198 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1199 *own_addr_type = ADDR_LE_DEV_RANDOM;
1200 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1201 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1202 &hdev->static_addr);
1203 return 0;
1204 }
1205
1206 /* Neither privacy nor static address is being used so use a
1207 * public address.
1208 */
1209 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1210
1211 return 0;
1212}
2cf22218 1213
405a2611
JH
1214static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1215{
1216 struct bdaddr_list *b;
1217
1218 list_for_each_entry(b, &hdev->whitelist, list) {
1219 struct hci_conn *conn;
1220
1221 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1222 if (!conn)
1223 return true;
1224
1225 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1226 return true;
1227 }
1228
1229 return false;
1230}
1231
01b1cb87 1232void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1233{
1234 struct hci_dev *hdev = req->hdev;
1235 u8 scan;
1236
d7a5a11d 1237 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1238 return;
1239
1240 if (!hdev_is_powered(hdev))
1241 return;
1242
1243 if (mgmt_powering_down(hdev))
1244 return;
1245
d7a5a11d 1246 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1247 disconnected_whitelist_entries(hdev))
1248 scan = SCAN_PAGE;
1249 else
1250 scan = SCAN_DISABLED;
1251
d7a5a11d 1252 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1253 scan |= SCAN_INQUIRY;
1254
01b1cb87
JH
1255 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1256 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1257 return;
1258
405a2611
JH
1259 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1260}
1261
01b1cb87 1262static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1263{
01b1cb87
JH
1264 hci_dev_lock(req->hdev);
1265 __hci_req_update_scan(req);
1266 hci_dev_unlock(req->hdev);
1267 return 0;
1268}
405a2611 1269
01b1cb87
JH
1270static void scan_update_work(struct work_struct *work)
1271{
1272 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1273
1274 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1275}
1276
53c0ba74
JH
1277static int connectable_update(struct hci_request *req, unsigned long opt)
1278{
1279 struct hci_dev *hdev = req->hdev;
1280
1281 hci_dev_lock(hdev);
1282
1283 __hci_req_update_scan(req);
1284
1285 /* If BR/EDR is not enabled and we disable advertising as a
1286 * by-product of disabling connectable, we need to update the
1287 * advertising flags.
1288 */
1289 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1290 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1291
1292 /* Update the advertising parameters if necessary */
1293 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1294 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1295 __hci_req_enable_advertising(req);
1296
1297 __hci_update_background_scan(req);
1298
1299 hci_dev_unlock(hdev);
1300
1301 return 0;
1302}
1303
1304static void connectable_update_work(struct work_struct *work)
1305{
1306 struct hci_dev *hdev = container_of(work, struct hci_dev,
1307 connectable_update);
1308 u8 status;
1309
1310 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1311 mgmt_set_connectable_complete(hdev, status);
1312}
1313
14bf5eac
JH
1314static u8 get_service_classes(struct hci_dev *hdev)
1315{
1316 struct bt_uuid *uuid;
1317 u8 val = 0;
1318
1319 list_for_each_entry(uuid, &hdev->uuids, list)
1320 val |= uuid->svc_hint;
1321
1322 return val;
1323}
1324
1325void __hci_req_update_class(struct hci_request *req)
1326{
1327 struct hci_dev *hdev = req->hdev;
1328 u8 cod[3];
1329
1330 BT_DBG("%s", hdev->name);
1331
1332 if (!hdev_is_powered(hdev))
1333 return;
1334
1335 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return;
1337
1338 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1339 return;
1340
1341 cod[0] = hdev->minor_class;
1342 cod[1] = hdev->major_class;
1343 cod[2] = get_service_classes(hdev);
1344
1345 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1346 cod[1] |= 0x20;
1347
1348 if (memcmp(cod, hdev->dev_class, 3) == 0)
1349 return;
1350
1351 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1352}
1353
aed1a885
JH
1354static void write_iac(struct hci_request *req)
1355{
1356 struct hci_dev *hdev = req->hdev;
1357 struct hci_cp_write_current_iac_lap cp;
1358
1359 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1360 return;
1361
1362 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1363 /* Limited discoverable mode */
1364 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1365 cp.iac_lap[0] = 0x00; /* LIAC */
1366 cp.iac_lap[1] = 0x8b;
1367 cp.iac_lap[2] = 0x9e;
1368 cp.iac_lap[3] = 0x33; /* GIAC */
1369 cp.iac_lap[4] = 0x8b;
1370 cp.iac_lap[5] = 0x9e;
1371 } else {
1372 /* General discoverable mode */
1373 cp.num_iac = 1;
1374 cp.iac_lap[0] = 0x33; /* GIAC */
1375 cp.iac_lap[1] = 0x8b;
1376 cp.iac_lap[2] = 0x9e;
1377 }
1378
1379 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1380 (cp.num_iac * 3) + 1, &cp);
1381}
1382
1383static int discoverable_update(struct hci_request *req, unsigned long opt)
1384{
1385 struct hci_dev *hdev = req->hdev;
1386
1387 hci_dev_lock(hdev);
1388
1389 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1390 write_iac(req);
1391 __hci_req_update_scan(req);
1392 __hci_req_update_class(req);
1393 }
1394
1395 /* Advertising instances don't use the global discoverable setting, so
1396 * only update AD if advertising was enabled using Set Advertising.
1397 */
1398 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1399 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1400
1401 hci_dev_unlock(hdev);
1402
1403 return 0;
1404}
1405
1406static void discoverable_update_work(struct work_struct *work)
1407{
1408 struct hci_dev *hdev = container_of(work, struct hci_dev,
1409 discoverable_update);
1410 u8 status;
1411
1412 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1413 mgmt_set_discoverable_complete(hdev, status);
1414}
1415
dcc0f0d9
JH
1416void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1417 u8 reason)
1418{
1419 switch (conn->state) {
1420 case BT_CONNECTED:
1421 case BT_CONFIG:
1422 if (conn->type == AMP_LINK) {
1423 struct hci_cp_disconn_phy_link cp;
1424
1425 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1426 cp.reason = reason;
1427 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1428 &cp);
1429 } else {
1430 struct hci_cp_disconnect dc;
1431
1432 dc.handle = cpu_to_le16(conn->handle);
1433 dc.reason = reason;
1434 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1435 }
1436
1437 conn->state = BT_DISCONN;
1438
1439 break;
1440 case BT_CONNECT:
1441 if (conn->type == LE_LINK) {
1442 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1443 break;
1444 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1445 0, NULL);
1446 } else if (conn->type == ACL_LINK) {
1447 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 break;
1449 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1450 6, &conn->dst);
1451 }
1452 break;
1453 case BT_CONNECT2:
1454 if (conn->type == ACL_LINK) {
1455 struct hci_cp_reject_conn_req rej;
1456
1457 bacpy(&rej.bdaddr, &conn->dst);
1458 rej.reason = reason;
1459
1460 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1461 sizeof(rej), &rej);
1462 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1463 struct hci_cp_reject_sync_conn_req rej;
1464
1465 bacpy(&rej.bdaddr, &conn->dst);
1466
1467 /* SCO rejection has its own limited set of
1468 * allowed error values (0x0D-0x0F) which isn't
1469 * compatible with most values passed to this
1470 * function. To be safe hard-code one of the
1471 * values that's suitable for SCO.
1472 */
1473 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1474
1475 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1476 sizeof(rej), &rej);
1477 }
1478 break;
1479 default:
1480 conn->state = BT_CLOSED;
1481 break;
1482 }
1483}
1484
1485static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1486{
1487 if (status)
1488 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1489}
1490
1491int hci_abort_conn(struct hci_conn *conn, u8 reason)
1492{
1493 struct hci_request req;
1494 int err;
1495
1496 hci_req_init(&req, conn->hdev);
1497
1498 __hci_abort_conn(&req, conn, reason);
1499
1500 err = hci_req_run(&req, abort_conn_complete);
1501 if (err && err != -ENODATA) {
1502 BT_ERR("Failed to run HCI request: err %d", err);
1503 return err;
1504 }
1505
1506 return 0;
1507}
5fc16cc4 1508
a1d01db1 1509static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
1510{
1511 hci_dev_lock(req->hdev);
1512 __hci_update_background_scan(req);
1513 hci_dev_unlock(req->hdev);
a1d01db1 1514 return 0;
2e93e53b
JH
1515}
1516
1517static void bg_scan_update(struct work_struct *work)
1518{
1519 struct hci_dev *hdev = container_of(work, struct hci_dev,
1520 bg_scan_update);
84235d22
JH
1521 struct hci_conn *conn;
1522 u8 status;
1523 int err;
1524
1525 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1526 if (!err)
1527 return;
1528
1529 hci_dev_lock(hdev);
1530
1531 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1532 if (conn)
1533 hci_le_conn_failed(conn, status);
2e93e53b 1534
84235d22 1535 hci_dev_unlock(hdev);
2e93e53b
JH
1536}
1537
f4a2cb4d 1538static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 1539{
f4a2cb4d
JH
1540 hci_req_add_le_scan_disable(req);
1541 return 0;
7c1fbed2
JH
1542}
1543
f4a2cb4d 1544static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 1545{
f4a2cb4d 1546 u8 length = opt;
7c1fbed2
JH
1547 /* General inquiry access code (GIAC) */
1548 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1549 struct hci_cp_inquiry cp;
7c1fbed2 1550
f4a2cb4d 1551 BT_DBG("%s", req->hdev->name);
7c1fbed2 1552
f4a2cb4d
JH
1553 hci_dev_lock(req->hdev);
1554 hci_inquiry_cache_flush(req->hdev);
1555 hci_dev_unlock(req->hdev);
7c1fbed2 1556
f4a2cb4d
JH
1557 memset(&cp, 0, sizeof(cp));
1558 memcpy(&cp.lap, lap, sizeof(cp.lap));
1559 cp.length = length;
7c1fbed2 1560
f4a2cb4d 1561 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 1562
a1d01db1 1563 return 0;
7c1fbed2
JH
1564}
1565
1566static void le_scan_disable_work(struct work_struct *work)
1567{
1568 struct hci_dev *hdev = container_of(work, struct hci_dev,
1569 le_scan_disable.work);
1570 u8 status;
7c1fbed2
JH
1571
1572 BT_DBG("%s", hdev->name);
1573
f4a2cb4d
JH
1574 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1575 return;
1576
7c1fbed2
JH
1577 cancel_delayed_work(&hdev->le_scan_restart);
1578
f4a2cb4d
JH
1579 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1580 if (status) {
1581 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1582 return;
1583 }
1584
1585 hdev->discovery.scan_start = 0;
1586
1587 /* If we were running LE only scan, change discovery state. If
1588 * we were running both LE and BR/EDR inquiry simultaneously,
1589 * and BR/EDR inquiry is already finished, stop discovery,
1590 * otherwise BR/EDR inquiry will stop discovery when finished.
1591 * If we will resolve remote device name, do not change
1592 * discovery state.
1593 */
1594
1595 if (hdev->discovery.type == DISCOV_TYPE_LE)
1596 goto discov_stopped;
1597
1598 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
1599 return;
1600
f4a2cb4d
JH
1601 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1602 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1603 hdev->discovery.state != DISCOVERY_RESOLVING)
1604 goto discov_stopped;
1605
1606 return;
1607 }
1608
1609 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1610 HCI_CMD_TIMEOUT, &status);
1611 if (status) {
1612 BT_ERR("Inquiry failed: status 0x%02x", status);
1613 goto discov_stopped;
1614 }
1615
1616 return;
1617
1618discov_stopped:
1619 hci_dev_lock(hdev);
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 hci_dev_unlock(hdev);
7c1fbed2
JH
1622}
1623
3dfe5905
JH
1624static int le_scan_restart(struct hci_request *req, unsigned long opt)
1625{
1626 struct hci_dev *hdev = req->hdev;
1627 struct hci_cp_le_set_scan_enable cp;
1628
1629 /* If controller is not scanning we are done. */
1630 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1631 return 0;
1632
1633 hci_req_add_le_scan_disable(req);
1634
1635 memset(&cp, 0, sizeof(cp));
1636 cp.enable = LE_SCAN_ENABLE;
1637 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1638 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639
1640 return 0;
1641}
1642
1643static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 1644{
3dfe5905
JH
1645 struct hci_dev *hdev = container_of(work, struct hci_dev,
1646 le_scan_restart.work);
7c1fbed2 1647 unsigned long timeout, duration, scan_start, now;
3dfe5905 1648 u8 status;
7c1fbed2
JH
1649
1650 BT_DBG("%s", hdev->name);
1651
3dfe5905 1652 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2
JH
1653 if (status) {
1654 BT_ERR("Failed to restart LE scan: status %d", status);
1655 return;
1656 }
1657
1658 hci_dev_lock(hdev);
1659
1660 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1661 !hdev->discovery.scan_start)
1662 goto unlock;
1663
1664 /* When the scan was started, hdev->le_scan_disable has been queued
1665 * after duration from scan_start. During scan restart this job
1666 * has been canceled, and we need to queue it again after proper
1667 * timeout, to make sure that scan does not run indefinitely.
1668 */
1669 duration = hdev->discovery.scan_duration;
1670 scan_start = hdev->discovery.scan_start;
1671 now = jiffies;
1672 if (now - scan_start <= duration) {
1673 int elapsed;
1674
1675 if (now >= scan_start)
1676 elapsed = now - scan_start;
1677 else
1678 elapsed = ULONG_MAX - scan_start + now;
1679
1680 timeout = duration - elapsed;
1681 } else {
1682 timeout = 0;
1683 }
1684
1685 queue_delayed_work(hdev->req_workqueue,
1686 &hdev->le_scan_disable, timeout);
1687
1688unlock:
1689 hci_dev_unlock(hdev);
1690}
1691
e68f072b
JH
1692static void disable_advertising(struct hci_request *req)
1693{
1694 u8 enable = 0x00;
1695
1696 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1697}
1698
1699static int active_scan(struct hci_request *req, unsigned long opt)
1700{
1701 uint16_t interval = opt;
1702 struct hci_dev *hdev = req->hdev;
1703 struct hci_cp_le_set_scan_param param_cp;
1704 struct hci_cp_le_set_scan_enable enable_cp;
1705 u8 own_addr_type;
1706 int err;
1707
1708 BT_DBG("%s", hdev->name);
1709
1710 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1711 hci_dev_lock(hdev);
1712
1713 /* Don't let discovery abort an outgoing connection attempt
1714 * that's using directed advertising.
1715 */
1716 if (hci_lookup_le_connect(hdev)) {
1717 hci_dev_unlock(hdev);
1718 return -EBUSY;
1719 }
1720
1721 cancel_adv_timeout(hdev);
1722 hci_dev_unlock(hdev);
1723
1724 disable_advertising(req);
1725 }
1726
1727 /* If controller is scanning, it means the background scanning is
1728 * running. Thus, we should temporarily stop it in order to set the
1729 * discovery scanning parameters.
1730 */
1731 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1732 hci_req_add_le_scan_disable(req);
1733
1734 /* All active scans will be done with either a resolvable private
1735 * address (when privacy feature has been enabled) or non-resolvable
1736 * private address.
1737 */
1738 err = hci_update_random_address(req, true, &own_addr_type);
1739 if (err < 0)
1740 own_addr_type = ADDR_LE_DEV_PUBLIC;
1741
1742 memset(&param_cp, 0, sizeof(param_cp));
1743 param_cp.type = LE_SCAN_ACTIVE;
1744 param_cp.interval = cpu_to_le16(interval);
1745 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1746 param_cp.own_address_type = own_addr_type;
1747
1748 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1749 &param_cp);
1750
1751 memset(&enable_cp, 0, sizeof(enable_cp));
1752 enable_cp.enable = LE_SCAN_ENABLE;
1753 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1754
1755 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1756 &enable_cp);
1757
1758 return 0;
1759}
1760
1761static int interleaved_discov(struct hci_request *req, unsigned long opt)
1762{
1763 int err;
1764
1765 BT_DBG("%s", req->hdev->name);
1766
1767 err = active_scan(req, opt);
1768 if (err)
1769 return err;
1770
7df26b56 1771 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
1772}
1773
1774static void start_discovery(struct hci_dev *hdev, u8 *status)
1775{
1776 unsigned long timeout;
1777
1778 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1779
1780 switch (hdev->discovery.type) {
1781 case DISCOV_TYPE_BREDR:
1782 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
1783 hci_req_sync(hdev, bredr_inquiry,
1784 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
1785 status);
1786 return;
1787 case DISCOV_TYPE_INTERLEAVED:
1788 /* When running simultaneous discovery, the LE scanning time
1789 * should occupy the whole discovery time sine BR/EDR inquiry
1790 * and LE scanning are scheduled by the controller.
1791 *
1792 * For interleaving discovery in comparison, BR/EDR inquiry
1793 * and LE scanning are done sequentially with separate
1794 * timeouts.
1795 */
1796 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1797 &hdev->quirks)) {
1798 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1799 /* During simultaneous discovery, we double LE scan
1800 * interval. We must leave some time for the controller
1801 * to do BR/EDR inquiry.
1802 */
1803 hci_req_sync(hdev, interleaved_discov,
1804 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1805 status);
1806 break;
1807 }
1808
1809 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1810 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1811 HCI_CMD_TIMEOUT, status);
1812 break;
1813 case DISCOV_TYPE_LE:
1814 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1815 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1816 HCI_CMD_TIMEOUT, status);
1817 break;
1818 default:
1819 *status = HCI_ERROR_UNSPECIFIED;
1820 return;
1821 }
1822
1823 if (*status)
1824 return;
1825
1826 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1827
1828 /* When service discovery is used and the controller has a
1829 * strict duplicate filter, it is important to remember the
1830 * start and duration of the scan. This is required for
1831 * restarting scanning during the discovery phase.
1832 */
1833 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1834 hdev->discovery.result_filtering) {
1835 hdev->discovery.scan_start = jiffies;
1836 hdev->discovery.scan_duration = timeout;
1837 }
1838
1839 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1840 timeout);
1841}
1842
2154d3f4
JH
1843bool hci_req_stop_discovery(struct hci_request *req)
1844{
1845 struct hci_dev *hdev = req->hdev;
1846 struct discovery_state *d = &hdev->discovery;
1847 struct hci_cp_remote_name_req_cancel cp;
1848 struct inquiry_entry *e;
1849 bool ret = false;
1850
1851 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1852
1853 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1854 if (test_bit(HCI_INQUIRY, &hdev->flags))
1855 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1856
1857 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1858 cancel_delayed_work(&hdev->le_scan_disable);
1859 hci_req_add_le_scan_disable(req);
1860 }
1861
1862 ret = true;
1863 } else {
1864 /* Passive scanning */
1865 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1866 hci_req_add_le_scan_disable(req);
1867 ret = true;
1868 }
1869 }
1870
1871 /* No further actions needed for LE-only discovery */
1872 if (d->type == DISCOV_TYPE_LE)
1873 return ret;
1874
1875 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1876 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1877 NAME_PENDING);
1878 if (!e)
1879 return ret;
1880
1881 bacpy(&cp.bdaddr, &e->data.bdaddr);
1882 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1883 &cp);
1884 ret = true;
1885 }
1886
1887 return ret;
1888}
1889
1890static int stop_discovery(struct hci_request *req, unsigned long opt)
1891{
1892 hci_dev_lock(req->hdev);
1893 hci_req_stop_discovery(req);
1894 hci_dev_unlock(req->hdev);
1895
1896 return 0;
1897}
1898
e68f072b
JH
1899static void discov_update(struct work_struct *work)
1900{
1901 struct hci_dev *hdev = container_of(work, struct hci_dev,
1902 discov_update);
1903 u8 status = 0;
1904
1905 switch (hdev->discovery.state) {
1906 case DISCOVERY_STARTING:
1907 start_discovery(hdev, &status);
1908 mgmt_start_discovery_complete(hdev, status);
1909 if (status)
1910 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1911 else
1912 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1913 break;
2154d3f4
JH
1914 case DISCOVERY_STOPPING:
1915 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1916 mgmt_stop_discovery_complete(hdev, status);
1917 if (!status)
1918 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1919 break;
e68f072b
JH
1920 case DISCOVERY_STOPPED:
1921 default:
1922 return;
1923 }
1924}
1925
c366f555
JH
1926static void discov_off(struct work_struct *work)
1927{
1928 struct hci_dev *hdev = container_of(work, struct hci_dev,
1929 discov_off.work);
1930
1931 BT_DBG("%s", hdev->name);
1932
1933 hci_dev_lock(hdev);
1934
1935 /* When discoverable timeout triggers, then just make sure
1936 * the limited discoverable flag is cleared. Even in the case
1937 * of a timeout triggered from general discoverable, it is
1938 * safe to unconditionally clear the flag.
1939 */
1940 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1941 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1942 hdev->discov_timeout = 0;
1943
1944 hci_dev_unlock(hdev);
1945
1946 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
1947 mgmt_new_settings(hdev);
1948}
1949
5fc16cc4
JH
1950void hci_request_setup(struct hci_dev *hdev)
1951{
e68f072b 1952 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 1953 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 1954 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 1955 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 1956 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 1957 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
1958 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1959 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 1960 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
1961}
1962
1963void hci_request_cancel_all(struct hci_dev *hdev)
1964{
7df0f73e
JH
1965 hci_req_sync_cancel(hdev, ENODEV);
1966
e68f072b 1967 cancel_work_sync(&hdev->discov_update);
2e93e53b 1968 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 1969 cancel_work_sync(&hdev->scan_update);
53c0ba74 1970 cancel_work_sync(&hdev->connectable_update);
aed1a885 1971 cancel_work_sync(&hdev->discoverable_update);
c366f555 1972 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
1973 cancel_delayed_work_sync(&hdev->le_scan_disable);
1974 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
1975
1976 if (hdev->adv_instance_timeout) {
1977 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1978 hdev->adv_instance_timeout = 0;
1979 }
5fc16cc4 1980}
This page took 0.152455 seconds and 5 git commands to generate.