2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/mgmt.h>
29 #include "hci_request.h"
31 #define HCI_REQ_DONE 0
32 #define HCI_REQ_PEND 1
33 #define HCI_REQ_CANCELED 2
35 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
37 skb_queue_head_init(&req
->cmd_q
);
42 static int req_run(struct hci_request
*req
, hci_req_complete_t complete
,
43 hci_req_complete_skb_t complete_skb
)
45 struct hci_dev
*hdev
= req
->hdev
;
49 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
55 skb_queue_purge(&req
->cmd_q
);
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req
->cmd_q
))
63 skb
= skb_peek_tail(&req
->cmd_q
);
65 bt_cb(skb
)->hci
.req_complete
= complete
;
66 } else if (complete_skb
) {
67 bt_cb(skb
)->hci
.req_complete_skb
= complete_skb
;
68 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_SKB
;
71 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
72 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
73 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
75 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
80 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
82 return req_run(req
, complete
, NULL
);
85 int hci_req_run_skb(struct hci_request
*req
, hci_req_complete_skb_t complete
)
87 return req_run(req
, NULL
, complete
);
90 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
93 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
95 if (hdev
->req_status
== HCI_REQ_PEND
) {
96 hdev
->req_result
= result
;
97 hdev
->req_status
= HCI_REQ_DONE
;
99 hdev
->req_skb
= skb_get(skb
);
100 wake_up_interruptible(&hdev
->req_wait_q
);
104 void hci_req_sync_cancel(struct hci_dev
*hdev
, int err
)
106 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
108 if (hdev
->req_status
== HCI_REQ_PEND
) {
109 hdev
->req_result
= err
;
110 hdev
->req_status
= HCI_REQ_CANCELED
;
111 wake_up_interruptible(&hdev
->req_wait_q
);
115 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
116 const void *param
, u8 event
, u32 timeout
)
118 DECLARE_WAITQUEUE(wait
, current
);
119 struct hci_request req
;
123 BT_DBG("%s", hdev
->name
);
125 hci_req_init(&req
, hdev
);
127 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
129 hdev
->req_status
= HCI_REQ_PEND
;
131 add_wait_queue(&hdev
->req_wait_q
, &wait
);
132 set_current_state(TASK_INTERRUPTIBLE
);
134 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
136 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
137 set_current_state(TASK_RUNNING
);
141 schedule_timeout(timeout
);
143 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
145 if (signal_pending(current
))
146 return ERR_PTR(-EINTR
);
148 switch (hdev
->req_status
) {
150 err
= -bt_to_errno(hdev
->req_result
);
153 case HCI_REQ_CANCELED
:
154 err
= -hdev
->req_result
;
162 hdev
->req_status
= hdev
->req_result
= 0;
164 hdev
->req_skb
= NULL
;
166 BT_DBG("%s end: err %d", hdev
->name
, err
);
174 return ERR_PTR(-ENODATA
);
178 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
180 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
181 const void *param
, u32 timeout
)
183 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
185 EXPORT_SYMBOL(__hci_cmd_sync
);
187 /* Execute request and wait for completion. */
188 int __hci_req_sync(struct hci_dev
*hdev
, int (*func
)(struct hci_request
*req
,
190 unsigned long opt
, u32 timeout
, u8
*hci_status
)
192 struct hci_request req
;
193 DECLARE_WAITQUEUE(wait
, current
);
196 BT_DBG("%s start", hdev
->name
);
198 hci_req_init(&req
, hdev
);
200 hdev
->req_status
= HCI_REQ_PEND
;
202 err
= func(&req
, opt
);
205 *hci_status
= HCI_ERROR_UNSPECIFIED
;
209 add_wait_queue(&hdev
->req_wait_q
, &wait
);
210 set_current_state(TASK_INTERRUPTIBLE
);
212 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
214 hdev
->req_status
= 0;
216 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
217 set_current_state(TASK_RUNNING
);
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
224 if (err
== -ENODATA
) {
231 *hci_status
= HCI_ERROR_UNSPECIFIED
;
236 schedule_timeout(timeout
);
238 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
240 if (signal_pending(current
))
243 switch (hdev
->req_status
) {
245 err
= -bt_to_errno(hdev
->req_result
);
247 *hci_status
= hdev
->req_result
;
250 case HCI_REQ_CANCELED
:
251 err
= -hdev
->req_result
;
253 *hci_status
= HCI_ERROR_UNSPECIFIED
;
259 *hci_status
= HCI_ERROR_UNSPECIFIED
;
263 hdev
->req_status
= hdev
->req_result
= 0;
265 BT_DBG("%s end: err %d", hdev
->name
, err
);
270 int hci_req_sync(struct hci_dev
*hdev
, int (*req
)(struct hci_request
*req
,
272 unsigned long opt
, u32 timeout
, u8
*hci_status
)
276 if (!test_bit(HCI_UP
, &hdev
->flags
))
279 /* Serialize all requests */
280 hci_req_sync_lock(hdev
);
281 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
, hci_status
);
282 hci_req_sync_unlock(hdev
);
287 struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
290 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
291 struct hci_command_hdr
*hdr
;
294 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
298 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
299 hdr
->opcode
= cpu_to_le16(opcode
);
303 memcpy(skb_put(skb
, plen
), param
, plen
);
305 BT_DBG("skb len %d", skb
->len
);
307 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
308 hci_skb_opcode(skb
) = opcode
;
313 /* Queue a command to an asynchronous HCI request */
314 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
315 const void *param
, u8 event
)
317 struct hci_dev
*hdev
= req
->hdev
;
320 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
322 /* If an error occurred during request building, there is no point in
323 * queueing the HCI command. We can simply return.
328 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
330 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
336 if (skb_queue_empty(&req
->cmd_q
))
337 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
339 bt_cb(skb
)->hci
.req_event
= event
;
341 skb_queue_tail(&req
->cmd_q
, skb
);
344 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
347 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
350 /* This function controls the background scanning based on hdev->pend_le_conns
351 * list. If there are pending LE connection we start the background scanning,
352 * otherwise we stop it.
354 * This function requires the caller holds hdev->lock.
356 static void __hci_update_background_scan(struct hci_request
*req
)
358 struct hci_dev
*hdev
= req
->hdev
;
360 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
361 test_bit(HCI_INIT
, &hdev
->flags
) ||
362 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
363 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
364 hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) ||
365 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
368 /* No point in doing scanning if LE support hasn't been enabled */
369 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
372 /* If discovery is active don't interfere with it */
373 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
376 /* Reset RSSI and UUID filters when starting background scanning
377 * since these filters are meant for service discovery only.
379 * The Start Discovery and Start Service Discovery operations
380 * ensure to set proper values for RSSI threshold and UUID
381 * filter list. So it is safe to just reset them here.
383 hci_discovery_filter_clear(hdev
);
385 if (list_empty(&hdev
->pend_le_conns
) &&
386 list_empty(&hdev
->pend_le_reports
)) {
387 /* If there is no pending LE connections or devices
388 * to be scanned for, we should stop the background
392 /* If controller is not scanning we are done. */
393 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
396 hci_req_add_le_scan_disable(req
);
398 BT_DBG("%s stopping background scanning", hdev
->name
);
400 /* If there is at least one pending LE connection, we should
401 * keep the background scan running.
404 /* If controller is connecting, we should not start scanning
405 * since some controllers are not able to scan and connect at
408 if (hci_lookup_le_connect(hdev
))
411 /* If controller is currently scanning, we stop it to ensure we
412 * don't miss any advertising (due to duplicates filter).
414 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
415 hci_req_add_le_scan_disable(req
);
417 hci_req_add_le_passive_scan(req
);
419 BT_DBG("%s starting background scanning", hdev
->name
);
423 void hci_req_add_le_scan_disable(struct hci_request
*req
)
425 struct hci_cp_le_set_scan_enable cp
;
427 memset(&cp
, 0, sizeof(cp
));
428 cp
.enable
= LE_SCAN_DISABLE
;
429 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
432 static void add_to_white_list(struct hci_request
*req
,
433 struct hci_conn_params
*params
)
435 struct hci_cp_le_add_to_white_list cp
;
437 cp
.bdaddr_type
= params
->addr_type
;
438 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
440 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
443 static u8
update_white_list(struct hci_request
*req
)
445 struct hci_dev
*hdev
= req
->hdev
;
446 struct hci_conn_params
*params
;
447 struct bdaddr_list
*b
;
448 uint8_t white_list_entries
= 0;
450 /* Go through the current white list programmed into the
451 * controller one by one and check if that address is still
452 * in the list of pending connections or list of devices to
453 * report. If not present in either list, then queue the
454 * command to remove it from the controller.
456 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
457 struct hci_cp_le_del_from_white_list cp
;
459 if (hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
460 &b
->bdaddr
, b
->bdaddr_type
) ||
461 hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
462 &b
->bdaddr
, b
->bdaddr_type
)) {
463 white_list_entries
++;
467 cp
.bdaddr_type
= b
->bdaddr_type
;
468 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
470 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
474 /* Since all no longer valid white list entries have been
475 * removed, walk through the list of pending connections
476 * and ensure that any new device gets programmed into
479 * If the list of the devices is larger than the list of
480 * available white list entries in the controller, then
481 * just abort and return filer policy value to not use the
484 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
485 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
486 ¶ms
->addr
, params
->addr_type
))
489 if (white_list_entries
>= hdev
->le_white_list_size
) {
490 /* Select filter policy to accept all advertising */
494 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
495 params
->addr_type
)) {
496 /* White list can not be used with RPAs */
500 white_list_entries
++;
501 add_to_white_list(req
, params
);
504 /* After adding all new pending connections, walk through
505 * the list of pending reports and also add these to the
506 * white list if there is still space.
508 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
509 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
510 ¶ms
->addr
, params
->addr_type
))
513 if (white_list_entries
>= hdev
->le_white_list_size
) {
514 /* Select filter policy to accept all advertising */
518 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
519 params
->addr_type
)) {
520 /* White list can not be used with RPAs */
524 white_list_entries
++;
525 add_to_white_list(req
, params
);
528 /* Select filter policy to use white list */
532 void hci_req_add_le_passive_scan(struct hci_request
*req
)
534 struct hci_cp_le_set_scan_param param_cp
;
535 struct hci_cp_le_set_scan_enable enable_cp
;
536 struct hci_dev
*hdev
= req
->hdev
;
540 /* Set require_privacy to false since no SCAN_REQ are send
541 * during passive scanning. Not using an non-resolvable address
542 * here is important so that peer devices using direct
543 * advertising with our address will be correctly reported
546 if (hci_update_random_address(req
, false, &own_addr_type
))
549 /* Adding or removing entries from the white list must
550 * happen before enabling scanning. The controller does
551 * not allow white list modification while scanning.
553 filter_policy
= update_white_list(req
);
555 /* When the controller is using random resolvable addresses and
556 * with that having LE privacy enabled, then controllers with
557 * Extended Scanner Filter Policies support can now enable support
558 * for handling directed advertising.
560 * So instead of using filter polices 0x00 (no whitelist)
561 * and 0x01 (whitelist enabled) use the new filter policies
562 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
564 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
) &&
565 (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
))
566 filter_policy
|= 0x02;
568 memset(¶m_cp
, 0, sizeof(param_cp
));
569 param_cp
.type
= LE_SCAN_PASSIVE
;
570 param_cp
.interval
= cpu_to_le16(hdev
->le_scan_interval
);
571 param_cp
.window
= cpu_to_le16(hdev
->le_scan_window
);
572 param_cp
.own_address_type
= own_addr_type
;
573 param_cp
.filter_policy
= filter_policy
;
574 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
577 memset(&enable_cp
, 0, sizeof(enable_cp
));
578 enable_cp
.enable
= LE_SCAN_ENABLE
;
579 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
580 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
584 static u8
get_current_adv_instance(struct hci_dev
*hdev
)
586 /* The "Set Advertising" setting supersedes the "Add Advertising"
587 * setting. Here we set the advertising data based on which
588 * setting was set. When neither apply, default to the global settings,
589 * represented by instance "0".
591 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
592 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
593 return hdev
->cur_adv_instance
;
598 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
600 u8 instance
= get_current_adv_instance(hdev
);
601 struct adv_info
*adv_instance
;
603 /* Ignore instance 0 */
604 if (instance
== 0x00)
607 adv_instance
= hci_find_adv_instance(hdev
, instance
);
611 /* TODO: Take into account the "appearance" and "local-name" flags here.
612 * These are currently being ignored as they are not supported.
614 return adv_instance
->scan_rsp_len
;
617 void __hci_req_disable_advertising(struct hci_request
*req
)
621 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
624 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
627 struct adv_info
*adv_instance
;
629 if (instance
== 0x00) {
630 /* Instance 0 always manages the "Tx Power" and "Flags"
633 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
635 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
636 * corresponds to the "connectable" instance flag.
638 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
639 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
644 adv_instance
= hci_find_adv_instance(hdev
, instance
);
646 /* Return 0 when we got an invalid instance identifier. */
650 return adv_instance
->flags
;
653 void __hci_req_enable_advertising(struct hci_request
*req
)
655 struct hci_dev
*hdev
= req
->hdev
;
656 struct hci_cp_le_set_adv_param cp
;
657 u8 own_addr_type
, enable
= 0x01;
662 if (hci_conn_num(hdev
, LE_LINK
) > 0)
665 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
666 __hci_req_disable_advertising(req
);
668 /* Clear the HCI_LE_ADV bit temporarily so that the
669 * hci_update_random_address knows that it's safe to go ahead
670 * and write a new random address. The flag will be set back on
671 * as soon as the SET_ADV_ENABLE HCI command completes.
673 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
675 instance
= get_current_adv_instance(hdev
);
676 flags
= get_adv_instance_flags(hdev
, instance
);
678 /* If the "connectable" instance flag was not set, then choose between
679 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
681 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
682 mgmt_get_connectable(hdev
);
684 /* Set require_privacy to true only when non-connectable
685 * advertising is used. In that case it is fine to use a
686 * non-resolvable private address.
688 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
691 memset(&cp
, 0, sizeof(cp
));
692 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
693 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
696 cp
.type
= LE_ADV_IND
;
697 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
698 cp
.type
= LE_ADV_SCAN_IND
;
700 cp
.type
= LE_ADV_NONCONN_IND
;
702 cp
.own_address_type
= own_addr_type
;
703 cp
.channel_map
= hdev
->le_adv_channel_map
;
705 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
707 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
710 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
715 name_len
= strlen(hdev
->dev_name
);
717 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
719 if (name_len
> max_len
) {
721 ptr
[1] = EIR_NAME_SHORT
;
723 ptr
[1] = EIR_NAME_COMPLETE
;
725 ptr
[0] = name_len
+ 1;
727 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
729 ad_len
+= (name_len
+ 2);
730 ptr
+= (name_len
+ 2);
736 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
739 struct adv_info
*adv_instance
;
741 adv_instance
= hci_find_adv_instance(hdev
, instance
);
745 /* TODO: Set the appropriate entries based on advertising instance flags
746 * here once flags other than 0 are supported.
748 memcpy(ptr
, adv_instance
->scan_rsp_data
,
749 adv_instance
->scan_rsp_len
);
751 return adv_instance
->scan_rsp_len
;
754 static void update_inst_scan_rsp_data(struct hci_request
*req
, u8 instance
)
756 struct hci_dev
*hdev
= req
->hdev
;
757 struct hci_cp_le_set_scan_rsp_data cp
;
760 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
763 memset(&cp
, 0, sizeof(cp
));
766 len
= create_instance_scan_rsp_data(hdev
, instance
, cp
.data
);
768 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
770 if (hdev
->scan_rsp_data_len
== len
&&
771 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
774 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
775 hdev
->scan_rsp_data_len
= len
;
779 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
782 void __hci_req_update_scan_rsp_data(struct hci_request
*req
, int instance
)
784 if (instance
== HCI_ADV_CURRENT
)
785 instance
= get_current_adv_instance(req
->hdev
);
787 update_inst_scan_rsp_data(req
, get_current_adv_instance(req
->hdev
));
790 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
792 struct adv_info
*adv_instance
= NULL
;
793 u8 ad_len
= 0, flags
= 0;
796 /* Return 0 when the current instance identifier is invalid. */
798 adv_instance
= hci_find_adv_instance(hdev
, instance
);
803 instance_flags
= get_adv_instance_flags(hdev
, instance
);
805 /* The Add Advertising command allows userspace to set both the general
806 * and limited discoverable flags.
808 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
809 flags
|= LE_AD_GENERAL
;
811 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
812 flags
|= LE_AD_LIMITED
;
814 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
815 /* If a discovery flag wasn't provided, simply use the global
819 flags
|= mgmt_get_adv_discov_flags(hdev
);
821 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
822 flags
|= LE_AD_NO_BREDR
;
824 /* If flags would still be empty, then there is no need to
825 * include the "Flags" AD field".
838 memcpy(ptr
, adv_instance
->adv_data
,
839 adv_instance
->adv_data_len
);
840 ad_len
+= adv_instance
->adv_data_len
;
841 ptr
+= adv_instance
->adv_data_len
;
844 /* Provide Tx Power only if we can provide a valid value for it */
845 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
846 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
848 ptr
[1] = EIR_TX_POWER
;
849 ptr
[2] = (u8
)hdev
->adv_tx_power
;
858 static void update_inst_adv_data(struct hci_request
*req
, u8 instance
)
860 struct hci_dev
*hdev
= req
->hdev
;
861 struct hci_cp_le_set_adv_data cp
;
864 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
867 memset(&cp
, 0, sizeof(cp
));
869 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
871 /* There's nothing to do if the data hasn't changed */
872 if (hdev
->adv_data_len
== len
&&
873 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
876 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
877 hdev
->adv_data_len
= len
;
881 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
884 void __hci_req_update_adv_data(struct hci_request
*req
, int instance
)
886 if (instance
== HCI_ADV_CURRENT
)
887 instance
= get_current_adv_instance(req
->hdev
);
889 update_inst_adv_data(req
, instance
);
892 int hci_req_update_adv_data(struct hci_dev
*hdev
, int instance
)
894 struct hci_request req
;
896 hci_req_init(&req
, hdev
);
897 __hci_req_update_adv_data(&req
, instance
);
899 return hci_req_run(&req
, NULL
);
902 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
904 BT_DBG("%s status %u", hdev
->name
, status
);
907 void hci_req_reenable_advertising(struct hci_dev
*hdev
)
909 struct hci_request req
;
912 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
913 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
916 instance
= get_current_adv_instance(hdev
);
918 hci_req_init(&req
, hdev
);
921 __hci_req_schedule_adv_instance(&req
, instance
, true);
923 __hci_req_update_adv_data(&req
, HCI_ADV_CURRENT
);
924 __hci_req_update_scan_rsp_data(&req
, HCI_ADV_CURRENT
);
925 __hci_req_enable_advertising(&req
);
928 hci_req_run(&req
, adv_enable_complete
);
931 static void adv_timeout_expire(struct work_struct
*work
)
933 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
934 adv_instance_expire
.work
);
936 struct hci_request req
;
939 BT_DBG("%s", hdev
->name
);
943 hdev
->adv_instance_timeout
= 0;
945 instance
= get_current_adv_instance(hdev
);
946 if (instance
== 0x00)
949 hci_req_init(&req
, hdev
);
951 hci_req_clear_adv_instance(hdev
, &req
, instance
, false);
953 if (list_empty(&hdev
->adv_instances
))
954 __hci_req_disable_advertising(&req
);
956 if (!skb_queue_empty(&req
.cmd_q
))
957 hci_req_run(&req
, NULL
);
960 hci_dev_unlock(hdev
);
963 int __hci_req_schedule_adv_instance(struct hci_request
*req
, u8 instance
,
966 struct hci_dev
*hdev
= req
->hdev
;
967 struct adv_info
*adv_instance
= NULL
;
970 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
971 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
974 if (hdev
->adv_instance_timeout
)
977 adv_instance
= hci_find_adv_instance(hdev
, instance
);
981 /* A zero timeout means unlimited advertising. As long as there is
982 * only one instance, duration should be ignored. We still set a timeout
983 * in case further instances are being added later on.
985 * If the remaining lifetime of the instance is more than the duration
986 * then the timeout corresponds to the duration, otherwise it will be
987 * reduced to the remaining instance lifetime.
989 if (adv_instance
->timeout
== 0 ||
990 adv_instance
->duration
<= adv_instance
->remaining_time
)
991 timeout
= adv_instance
->duration
;
993 timeout
= adv_instance
->remaining_time
;
995 /* The remaining time is being reduced unless the instance is being
996 * advertised without time limit.
998 if (adv_instance
->timeout
)
999 adv_instance
->remaining_time
=
1000 adv_instance
->remaining_time
- timeout
;
1002 hdev
->adv_instance_timeout
= timeout
;
1003 queue_delayed_work(hdev
->req_workqueue
,
1004 &hdev
->adv_instance_expire
,
1005 msecs_to_jiffies(timeout
* 1000));
1007 /* If we're just re-scheduling the same instance again then do not
1008 * execute any HCI commands. This happens when a single instance is
1011 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1012 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1015 hdev
->cur_adv_instance
= instance
;
1016 __hci_req_update_adv_data(req
, HCI_ADV_CURRENT
);
1017 __hci_req_update_scan_rsp_data(req
, HCI_ADV_CURRENT
);
1018 __hci_req_enable_advertising(req
);
1023 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1025 if (hdev
->adv_instance_timeout
) {
1026 hdev
->adv_instance_timeout
= 0;
1027 cancel_delayed_work(&hdev
->adv_instance_expire
);
1031 /* For a single instance:
1032 * - force == true: The instance will be removed even when its remaining
1033 * lifetime is not zero.
1034 * - force == false: the instance will be deactivated but kept stored unless
1035 * the remaining lifetime is zero.
1037 * For instance == 0x00:
1038 * - force == true: All instances will be removed regardless of their timeout
1040 * - force == false: Only instances that have a timeout will be removed.
1042 void hci_req_clear_adv_instance(struct hci_dev
*hdev
, struct hci_request
*req
,
1043 u8 instance
, bool force
)
1045 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1049 /* Cancel any timeout concerning the removed instance(s). */
1050 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1051 cancel_adv_timeout(hdev
);
1053 /* Get the next instance to advertise BEFORE we remove
1054 * the current one. This can be the same instance again
1055 * if there is only one instance.
1057 if (instance
&& hdev
->cur_adv_instance
== instance
)
1058 next_instance
= hci_get_next_instance(hdev
, instance
);
1060 if (instance
== 0x00) {
1061 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1063 if (!(force
|| adv_instance
->timeout
))
1066 rem_inst
= adv_instance
->instance
;
1067 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1069 mgmt_advertising_removed(NULL
, hdev
, rem_inst
);
1071 hdev
->cur_adv_instance
= 0x00;
1073 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1075 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1076 !adv_instance
->remaining_time
)) {
1077 /* Don't advertise a removed instance. */
1078 if (next_instance
&&
1079 next_instance
->instance
== instance
)
1080 next_instance
= NULL
;
1082 err
= hci_remove_adv_instance(hdev
, instance
);
1084 mgmt_advertising_removed(NULL
, hdev
, instance
);
1088 if (list_empty(&hdev
->adv_instances
)) {
1089 hdev
->cur_adv_instance
= 0x00;
1090 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
1093 if (!req
|| !hdev_is_powered(hdev
) ||
1094 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1098 __hci_req_schedule_adv_instance(req
, next_instance
->instance
,
1102 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
1104 struct hci_dev
*hdev
= req
->hdev
;
1106 /* If we're advertising or initiating an LE connection we can't
1107 * go ahead and change the random address at this time. This is
1108 * because the eventual initiator address used for the
1109 * subsequently created connection will be undefined (some
1110 * controllers use the new address and others the one we had
1111 * when the operation started).
1113 * In this kind of scenario skip the update and let the random
1114 * address be updated at the next cycle.
1116 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
) ||
1117 hci_lookup_le_connect(hdev
)) {
1118 BT_DBG("Deferring random address update");
1119 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1123 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
1126 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
1129 struct hci_dev
*hdev
= req
->hdev
;
1132 /* If privacy is enabled use a resolvable private address. If
1133 * current RPA has expired or there is something else than
1134 * the current RPA in use, then generate a new one.
1136 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
1139 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1141 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1142 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1145 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1147 BT_ERR("%s failed to generate new RPA", hdev
->name
);
1151 set_random_addr(req
, &hdev
->rpa
);
1153 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1154 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
1159 /* In case of required privacy without resolvable private address,
1160 * use an non-resolvable private address. This is useful for active
1161 * scanning and non-connectable advertising.
1163 if (require_privacy
) {
1167 /* The non-resolvable private address is generated
1168 * from random six bytes with the two most significant
1171 get_random_bytes(&nrpa
, 6);
1174 /* The non-resolvable private address shall not be
1175 * equal to the public address.
1177 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1181 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1182 set_random_addr(req
, &nrpa
);
1186 /* If forcing static address is in use or there is no public
1187 * address use the static address as random address (but skip
1188 * the HCI command if the current random address is already the
1191 * In case BR/EDR has been disabled on a dual-mode controller
1192 * and a static address has been configured, then use that
1193 * address instead of the public BR/EDR address.
1195 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
1196 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
1197 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
1198 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
1199 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1200 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
1201 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
1202 &hdev
->static_addr
);
1206 /* Neither privacy nor static address is being used so use a
1209 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1214 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
1216 struct bdaddr_list
*b
;
1218 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
1219 struct hci_conn
*conn
;
1221 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
1225 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
1232 void __hci_req_update_scan(struct hci_request
*req
)
1234 struct hci_dev
*hdev
= req
->hdev
;
1237 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1240 if (!hdev_is_powered(hdev
))
1243 if (mgmt_powering_down(hdev
))
1246 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) ||
1247 disconnected_whitelist_entries(hdev
))
1250 scan
= SCAN_DISABLED
;
1252 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
1253 scan
|= SCAN_INQUIRY
;
1255 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
) &&
1256 test_bit(HCI_ISCAN
, &hdev
->flags
) == !!(scan
& SCAN_INQUIRY
))
1259 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1262 static int update_scan(struct hci_request
*req
, unsigned long opt
)
1264 hci_dev_lock(req
->hdev
);
1265 __hci_req_update_scan(req
);
1266 hci_dev_unlock(req
->hdev
);
1270 static void scan_update_work(struct work_struct
*work
)
1272 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, scan_update
);
1274 hci_req_sync(hdev
, update_scan
, 0, HCI_CMD_TIMEOUT
, NULL
);
1277 void __hci_abort_conn(struct hci_request
*req
, struct hci_conn
*conn
,
1280 switch (conn
->state
) {
1283 if (conn
->type
== AMP_LINK
) {
1284 struct hci_cp_disconn_phy_link cp
;
1286 cp
.phy_handle
= HCI_PHY_HANDLE(conn
->handle
);
1288 hci_req_add(req
, HCI_OP_DISCONN_PHY_LINK
, sizeof(cp
),
1291 struct hci_cp_disconnect dc
;
1293 dc
.handle
= cpu_to_le16(conn
->handle
);
1295 hci_req_add(req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1298 conn
->state
= BT_DISCONN
;
1302 if (conn
->type
== LE_LINK
) {
1303 if (test_bit(HCI_CONN_SCANNING
, &conn
->flags
))
1305 hci_req_add(req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1307 } else if (conn
->type
== ACL_LINK
) {
1308 if (req
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1310 hci_req_add(req
, HCI_OP_CREATE_CONN_CANCEL
,
1315 if (conn
->type
== ACL_LINK
) {
1316 struct hci_cp_reject_conn_req rej
;
1318 bacpy(&rej
.bdaddr
, &conn
->dst
);
1319 rej
.reason
= reason
;
1321 hci_req_add(req
, HCI_OP_REJECT_CONN_REQ
,
1323 } else if (conn
->type
== SCO_LINK
|| conn
->type
== ESCO_LINK
) {
1324 struct hci_cp_reject_sync_conn_req rej
;
1326 bacpy(&rej
.bdaddr
, &conn
->dst
);
1328 /* SCO rejection has its own limited set of
1329 * allowed error values (0x0D-0x0F) which isn't
1330 * compatible with most values passed to this
1331 * function. To be safe hard-code one of the
1332 * values that's suitable for SCO.
1334 rej
.reason
= HCI_ERROR_REMOTE_LOW_RESOURCES
;
1336 hci_req_add(req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1341 conn
->state
= BT_CLOSED
;
1346 static void abort_conn_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1349 BT_DBG("Failed to abort connection: status 0x%2.2x", status
);
1352 int hci_abort_conn(struct hci_conn
*conn
, u8 reason
)
1354 struct hci_request req
;
1357 hci_req_init(&req
, conn
->hdev
);
1359 __hci_abort_conn(&req
, conn
, reason
);
1361 err
= hci_req_run(&req
, abort_conn_complete
);
1362 if (err
&& err
!= -ENODATA
) {
1363 BT_ERR("Failed to run HCI request: err %d", err
);
1370 static int update_bg_scan(struct hci_request
*req
, unsigned long opt
)
1372 hci_dev_lock(req
->hdev
);
1373 __hci_update_background_scan(req
);
1374 hci_dev_unlock(req
->hdev
);
1378 static void bg_scan_update(struct work_struct
*work
)
1380 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1382 struct hci_conn
*conn
;
1386 err
= hci_req_sync(hdev
, update_bg_scan
, 0, HCI_CMD_TIMEOUT
, &status
);
1392 conn
= hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
);
1394 hci_le_conn_failed(conn
, status
);
1396 hci_dev_unlock(hdev
);
1399 static int le_scan_disable(struct hci_request
*req
, unsigned long opt
)
1401 hci_req_add_le_scan_disable(req
);
1405 static int bredr_inquiry(struct hci_request
*req
, unsigned long opt
)
1408 /* General inquiry access code (GIAC) */
1409 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
1410 struct hci_cp_inquiry cp
;
1412 BT_DBG("%s", req
->hdev
->name
);
1414 hci_dev_lock(req
->hdev
);
1415 hci_inquiry_cache_flush(req
->hdev
);
1416 hci_dev_unlock(req
->hdev
);
1418 memset(&cp
, 0, sizeof(cp
));
1419 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
1422 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1427 static void le_scan_disable_work(struct work_struct
*work
)
1429 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1430 le_scan_disable
.work
);
1433 BT_DBG("%s", hdev
->name
);
1435 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
1438 cancel_delayed_work(&hdev
->le_scan_restart
);
1440 hci_req_sync(hdev
, le_scan_disable
, 0, HCI_CMD_TIMEOUT
, &status
);
1442 BT_ERR("Failed to disable LE scan: status 0x%02x", status
);
1446 hdev
->discovery
.scan_start
= 0;
1448 /* If we were running LE only scan, change discovery state. If
1449 * we were running both LE and BR/EDR inquiry simultaneously,
1450 * and BR/EDR inquiry is already finished, stop discovery,
1451 * otherwise BR/EDR inquiry will stop discovery when finished.
1452 * If we will resolve remote device name, do not change
1456 if (hdev
->discovery
.type
== DISCOV_TYPE_LE
)
1457 goto discov_stopped
;
1459 if (hdev
->discovery
.type
!= DISCOV_TYPE_INTERLEAVED
)
1462 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
)) {
1463 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
1464 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
1465 goto discov_stopped
;
1470 hci_req_sync(hdev
, bredr_inquiry
, DISCOV_INTERLEAVED_INQUIRY_LEN
,
1471 HCI_CMD_TIMEOUT
, &status
);
1473 BT_ERR("Inquiry failed: status 0x%02x", status
);
1474 goto discov_stopped
;
1481 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1482 hci_dev_unlock(hdev
);
1485 static int le_scan_restart(struct hci_request
*req
, unsigned long opt
)
1487 struct hci_dev
*hdev
= req
->hdev
;
1488 struct hci_cp_le_set_scan_enable cp
;
1490 /* If controller is not scanning we are done. */
1491 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
1494 hci_req_add_le_scan_disable(req
);
1496 memset(&cp
, 0, sizeof(cp
));
1497 cp
.enable
= LE_SCAN_ENABLE
;
1498 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
1499 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1504 static void le_scan_restart_work(struct work_struct
*work
)
1506 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1507 le_scan_restart
.work
);
1508 unsigned long timeout
, duration
, scan_start
, now
;
1511 BT_DBG("%s", hdev
->name
);
1513 hci_req_sync(hdev
, le_scan_restart
, 0, HCI_CMD_TIMEOUT
, &status
);
1515 BT_ERR("Failed to restart LE scan: status %d", status
);
1521 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
1522 !hdev
->discovery
.scan_start
)
1525 /* When the scan was started, hdev->le_scan_disable has been queued
1526 * after duration from scan_start. During scan restart this job
1527 * has been canceled, and we need to queue it again after proper
1528 * timeout, to make sure that scan does not run indefinitely.
1530 duration
= hdev
->discovery
.scan_duration
;
1531 scan_start
= hdev
->discovery
.scan_start
;
1533 if (now
- scan_start
<= duration
) {
1536 if (now
>= scan_start
)
1537 elapsed
= now
- scan_start
;
1539 elapsed
= ULONG_MAX
- scan_start
+ now
;
1541 timeout
= duration
- elapsed
;
1546 queue_delayed_work(hdev
->req_workqueue
,
1547 &hdev
->le_scan_disable
, timeout
);
1550 hci_dev_unlock(hdev
);
1553 static void disable_advertising(struct hci_request
*req
)
1557 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1560 static int active_scan(struct hci_request
*req
, unsigned long opt
)
1562 uint16_t interval
= opt
;
1563 struct hci_dev
*hdev
= req
->hdev
;
1564 struct hci_cp_le_set_scan_param param_cp
;
1565 struct hci_cp_le_set_scan_enable enable_cp
;
1569 BT_DBG("%s", hdev
->name
);
1571 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
1574 /* Don't let discovery abort an outgoing connection attempt
1575 * that's using directed advertising.
1577 if (hci_lookup_le_connect(hdev
)) {
1578 hci_dev_unlock(hdev
);
1582 cancel_adv_timeout(hdev
);
1583 hci_dev_unlock(hdev
);
1585 disable_advertising(req
);
1588 /* If controller is scanning, it means the background scanning is
1589 * running. Thus, we should temporarily stop it in order to set the
1590 * discovery scanning parameters.
1592 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
1593 hci_req_add_le_scan_disable(req
);
1595 /* All active scans will be done with either a resolvable private
1596 * address (when privacy feature has been enabled) or non-resolvable
1599 err
= hci_update_random_address(req
, true, &own_addr_type
);
1601 own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1603 memset(¶m_cp
, 0, sizeof(param_cp
));
1604 param_cp
.type
= LE_SCAN_ACTIVE
;
1605 param_cp
.interval
= cpu_to_le16(interval
);
1606 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
1607 param_cp
.own_address_type
= own_addr_type
;
1609 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
1612 memset(&enable_cp
, 0, sizeof(enable_cp
));
1613 enable_cp
.enable
= LE_SCAN_ENABLE
;
1614 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
1616 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
1622 static int interleaved_discov(struct hci_request
*req
, unsigned long opt
)
1626 BT_DBG("%s", req
->hdev
->name
);
1628 err
= active_scan(req
, opt
);
1632 return bredr_inquiry(req
, DISCOV_BREDR_INQUIRY_LEN
);
1635 static void start_discovery(struct hci_dev
*hdev
, u8
*status
)
1637 unsigned long timeout
;
1639 BT_DBG("%s type %u", hdev
->name
, hdev
->discovery
.type
);
1641 switch (hdev
->discovery
.type
) {
1642 case DISCOV_TYPE_BREDR
:
1643 if (!hci_dev_test_flag(hdev
, HCI_INQUIRY
))
1644 hci_req_sync(hdev
, bredr_inquiry
,
1645 DISCOV_BREDR_INQUIRY_LEN
, HCI_CMD_TIMEOUT
,
1648 case DISCOV_TYPE_INTERLEAVED
:
1649 /* When running simultaneous discovery, the LE scanning time
1650 * should occupy the whole discovery time sine BR/EDR inquiry
1651 * and LE scanning are scheduled by the controller.
1653 * For interleaving discovery in comparison, BR/EDR inquiry
1654 * and LE scanning are done sequentially with separate
1657 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
1659 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
1660 /* During simultaneous discovery, we double LE scan
1661 * interval. We must leave some time for the controller
1662 * to do BR/EDR inquiry.
1664 hci_req_sync(hdev
, interleaved_discov
,
1665 DISCOV_LE_SCAN_INT
* 2, HCI_CMD_TIMEOUT
,
1670 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
1671 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
1672 HCI_CMD_TIMEOUT
, status
);
1674 case DISCOV_TYPE_LE
:
1675 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
1676 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
1677 HCI_CMD_TIMEOUT
, status
);
1680 *status
= HCI_ERROR_UNSPECIFIED
;
1687 BT_DBG("%s timeout %u ms", hdev
->name
, jiffies_to_msecs(timeout
));
1689 /* When service discovery is used and the controller has a
1690 * strict duplicate filter, it is important to remember the
1691 * start and duration of the scan. This is required for
1692 * restarting scanning during the discovery phase.
1694 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) &&
1695 hdev
->discovery
.result_filtering
) {
1696 hdev
->discovery
.scan_start
= jiffies
;
1697 hdev
->discovery
.scan_duration
= timeout
;
1700 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_disable
,
1704 bool hci_req_stop_discovery(struct hci_request
*req
)
1706 struct hci_dev
*hdev
= req
->hdev
;
1707 struct discovery_state
*d
= &hdev
->discovery
;
1708 struct hci_cp_remote_name_req_cancel cp
;
1709 struct inquiry_entry
*e
;
1712 BT_DBG("%s state %u", hdev
->name
, hdev
->discovery
.state
);
1714 if (d
->state
== DISCOVERY_FINDING
|| d
->state
== DISCOVERY_STOPPING
) {
1715 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1716 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1718 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1719 cancel_delayed_work(&hdev
->le_scan_disable
);
1720 hci_req_add_le_scan_disable(req
);
1725 /* Passive scanning */
1726 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1727 hci_req_add_le_scan_disable(req
);
1732 /* No further actions needed for LE-only discovery */
1733 if (d
->type
== DISCOV_TYPE_LE
)
1736 if (d
->state
== DISCOVERY_RESOLVING
|| d
->state
== DISCOVERY_STOPPING
) {
1737 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1742 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1743 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1751 static int stop_discovery(struct hci_request
*req
, unsigned long opt
)
1753 hci_dev_lock(req
->hdev
);
1754 hci_req_stop_discovery(req
);
1755 hci_dev_unlock(req
->hdev
);
1760 static void discov_update(struct work_struct
*work
)
1762 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1766 switch (hdev
->discovery
.state
) {
1767 case DISCOVERY_STARTING
:
1768 start_discovery(hdev
, &status
);
1769 mgmt_start_discovery_complete(hdev
, status
);
1771 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1773 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
1775 case DISCOVERY_STOPPING
:
1776 hci_req_sync(hdev
, stop_discovery
, 0, HCI_CMD_TIMEOUT
, &status
);
1777 mgmt_stop_discovery_complete(hdev
, status
);
1779 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1781 case DISCOVERY_STOPPED
:
1787 void hci_request_setup(struct hci_dev
*hdev
)
1789 INIT_WORK(&hdev
->discov_update
, discov_update
);
1790 INIT_WORK(&hdev
->bg_scan_update
, bg_scan_update
);
1791 INIT_WORK(&hdev
->scan_update
, scan_update_work
);
1792 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1793 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
1794 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, adv_timeout_expire
);
1797 void hci_request_cancel_all(struct hci_dev
*hdev
)
1799 hci_req_sync_cancel(hdev
, ENODEV
);
1801 cancel_work_sync(&hdev
->discov_update
);
1802 cancel_work_sync(&hdev
->bg_scan_update
);
1803 cancel_work_sync(&hdev
->scan_update
);
1804 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1805 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
1807 if (hdev
->adv_instance_timeout
) {
1808 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
1809 hdev
->adv_instance_timeout
= 0;