2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands
[] = {
42 MGMT_OP_READ_INDEX_LIST
,
45 MGMT_OP_SET_DISCOVERABLE
,
46 MGMT_OP_SET_CONNECTABLE
,
47 MGMT_OP_SET_FAST_CONNECTABLE
,
49 MGMT_OP_SET_LINK_SECURITY
,
53 MGMT_OP_SET_DEV_CLASS
,
54 MGMT_OP_SET_LOCAL_NAME
,
57 MGMT_OP_LOAD_LINK_KEYS
,
58 MGMT_OP_LOAD_LONG_TERM_KEYS
,
60 MGMT_OP_GET_CONNECTIONS
,
61 MGMT_OP_PIN_CODE_REPLY
,
62 MGMT_OP_PIN_CODE_NEG_REPLY
,
63 MGMT_OP_SET_IO_CAPABILITY
,
65 MGMT_OP_CANCEL_PAIR_DEVICE
,
66 MGMT_OP_UNPAIR_DEVICE
,
67 MGMT_OP_USER_CONFIRM_REPLY
,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
69 MGMT_OP_USER_PASSKEY_REPLY
,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
71 MGMT_OP_READ_LOCAL_OOB_DATA
,
72 MGMT_OP_ADD_REMOTE_OOB_DATA
,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
74 MGMT_OP_START_DISCOVERY
,
75 MGMT_OP_STOP_DISCOVERY
,
78 MGMT_OP_UNBLOCK_DEVICE
,
79 MGMT_OP_SET_DEVICE_ID
,
80 MGMT_OP_SET_ADVERTISING
,
82 MGMT_OP_SET_STATIC_ADDRESS
,
83 MGMT_OP_SET_SCAN_PARAMS
,
84 MGMT_OP_SET_SECURE_CONN
,
85 MGMT_OP_SET_DEBUG_KEYS
,
88 MGMT_OP_GET_CONN_INFO
,
89 MGMT_OP_GET_CLOCK_INFO
,
91 MGMT_OP_REMOVE_DEVICE
,
92 MGMT_OP_LOAD_CONN_PARAM
,
93 MGMT_OP_READ_UNCONF_INDEX_LIST
,
94 MGMT_OP_READ_CONFIG_INFO
,
95 MGMT_OP_SET_EXTERNAL_CONFIG
,
96 MGMT_OP_SET_PUBLIC_ADDRESS
,
97 MGMT_OP_START_SERVICE_DISCOVERY
,
100 static const u16 mgmt_events
[] = {
101 MGMT_EV_CONTROLLER_ERROR
,
103 MGMT_EV_INDEX_REMOVED
,
104 MGMT_EV_NEW_SETTINGS
,
105 MGMT_EV_CLASS_OF_DEV_CHANGED
,
106 MGMT_EV_LOCAL_NAME_CHANGED
,
107 MGMT_EV_NEW_LINK_KEY
,
108 MGMT_EV_NEW_LONG_TERM_KEY
,
109 MGMT_EV_DEVICE_CONNECTED
,
110 MGMT_EV_DEVICE_DISCONNECTED
,
111 MGMT_EV_CONNECT_FAILED
,
112 MGMT_EV_PIN_CODE_REQUEST
,
113 MGMT_EV_USER_CONFIRM_REQUEST
,
114 MGMT_EV_USER_PASSKEY_REQUEST
,
116 MGMT_EV_DEVICE_FOUND
,
118 MGMT_EV_DEVICE_BLOCKED
,
119 MGMT_EV_DEVICE_UNBLOCKED
,
120 MGMT_EV_DEVICE_UNPAIRED
,
121 MGMT_EV_PASSKEY_NOTIFY
,
124 MGMT_EV_DEVICE_ADDED
,
125 MGMT_EV_DEVICE_REMOVED
,
126 MGMT_EV_NEW_CONN_PARAM
,
127 MGMT_EV_UNCONF_INDEX_ADDED
,
128 MGMT_EV_UNCONF_INDEX_REMOVED
,
129 MGMT_EV_NEW_CONFIG_OPTIONS
,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list
;
142 void (*cmd_complete
)(struct pending_cmd
*cmd
, u8 status
);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table
[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
150 MGMT_STATUS_FAILED
, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
155 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY
, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED
, /* Rejected Security */
162 MGMT_STATUS_REJECTED
, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
170 MGMT_STATUS_BUSY
, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED
, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED
, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED
, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED
, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY
, /* Role Switch Pending */
196 MGMT_STATUS_FAILED
, /* Slot Violation */
197 MGMT_STATUS_FAILED
, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY
, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
210 static u8
mgmt_status(u8 hci_status
)
212 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
213 return mgmt_status_table
[hci_status
];
215 return MGMT_STATUS_FAILED
;
218 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
219 struct sock
*skip_sk
)
222 struct mgmt_hdr
*hdr
;
224 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
228 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
229 hdr
->opcode
= cpu_to_le16(event
);
231 hdr
->index
= cpu_to_le16(hdev
->id
);
233 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
234 hdr
->len
= cpu_to_le16(data_len
);
237 memcpy(skb_put(skb
, data_len
), data
, data_len
);
240 __net_timestamp(skb
);
242 hci_send_to_control(skb
, skip_sk
);
248 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
251 struct mgmt_hdr
*hdr
;
252 struct mgmt_ev_cmd_status
*ev
;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
257 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
261 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
263 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
264 hdr
->index
= cpu_to_le16(index
);
265 hdr
->len
= cpu_to_le16(sizeof(*ev
));
267 ev
= (void *) skb_put(skb
, sizeof(*ev
));
269 ev
->opcode
= cpu_to_le16(cmd
);
271 err
= sock_queue_rcv_skb(sk
, skb
);
278 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
279 void *rp
, size_t rp_len
)
282 struct mgmt_hdr
*hdr
;
283 struct mgmt_ev_cmd_complete
*ev
;
286 BT_DBG("sock %p", sk
);
288 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
292 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
294 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
295 hdr
->index
= cpu_to_le16(index
);
296 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
298 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
299 ev
->opcode
= cpu_to_le16(cmd
);
303 memcpy(ev
->data
, rp
, rp_len
);
305 err
= sock_queue_rcv_skb(sk
, skb
);
312 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
315 struct mgmt_rp_read_version rp
;
317 BT_DBG("sock %p", sk
);
319 rp
.version
= MGMT_VERSION
;
320 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
322 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
326 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
329 struct mgmt_rp_read_commands
*rp
;
330 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
331 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
336 BT_DBG("sock %p", sk
);
338 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
340 rp
= kmalloc(rp_size
, GFP_KERNEL
);
344 rp
->num_commands
= cpu_to_le16(num_commands
);
345 rp
->num_events
= cpu_to_le16(num_events
);
347 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
348 put_unaligned_le16(mgmt_commands
[i
], opcode
);
350 for (i
= 0; i
< num_events
; i
++, opcode
++)
351 put_unaligned_le16(mgmt_events
[i
], opcode
);
353 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
360 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
363 struct mgmt_rp_read_index_list
*rp
;
369 BT_DBG("sock %p", sk
);
371 read_lock(&hci_dev_list_lock
);
374 list_for_each_entry(d
, &hci_dev_list
, list
) {
375 if (d
->dev_type
== HCI_BREDR
&&
376 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
380 rp_len
= sizeof(*rp
) + (2 * count
);
381 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
383 read_unlock(&hci_dev_list_lock
);
388 list_for_each_entry(d
, &hci_dev_list
, list
) {
389 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
390 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
391 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
400 if (d
->dev_type
== HCI_BREDR
&&
401 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
402 rp
->index
[count
++] = cpu_to_le16(d
->id
);
403 BT_DBG("Added hci%u", d
->id
);
407 rp
->num_controllers
= cpu_to_le16(count
);
408 rp_len
= sizeof(*rp
) + (2 * count
);
410 read_unlock(&hci_dev_list_lock
);
412 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
420 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
421 void *data
, u16 data_len
)
423 struct mgmt_rp_read_unconf_index_list
*rp
;
429 BT_DBG("sock %p", sk
);
431 read_lock(&hci_dev_list_lock
);
434 list_for_each_entry(d
, &hci_dev_list
, list
) {
435 if (d
->dev_type
== HCI_BREDR
&&
436 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
440 rp_len
= sizeof(*rp
) + (2 * count
);
441 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
443 read_unlock(&hci_dev_list_lock
);
448 list_for_each_entry(d
, &hci_dev_list
, list
) {
449 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
450 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
451 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
460 if (d
->dev_type
== HCI_BREDR
&&
461 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
462 rp
->index
[count
++] = cpu_to_le16(d
->id
);
463 BT_DBG("Added hci%u", d
->id
);
467 rp
->num_controllers
= cpu_to_le16(count
);
468 rp_len
= sizeof(*rp
) + (2 * count
);
470 read_unlock(&hci_dev_list_lock
);
472 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_UNCONF_INDEX_LIST
,
480 static bool is_configured(struct hci_dev
*hdev
)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
483 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
487 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
493 static __le32
get_missing_options(struct hci_dev
*hdev
)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
498 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
499 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
502 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
503 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
505 return cpu_to_le32(options
);
508 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
510 __le32 options
= get_missing_options(hdev
);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
513 sizeof(options
), skip
);
516 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
518 __le32 options
= get_missing_options(hdev
);
520 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
524 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
525 void *data
, u16 data_len
)
527 struct mgmt_rp_read_config_info rp
;
530 BT_DBG("sock %p %s", sk
, hdev
->name
);
534 memset(&rp
, 0, sizeof(rp
));
535 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
538 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
540 if (hdev
->set_bdaddr
)
541 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
543 rp
.supported_options
= cpu_to_le32(options
);
544 rp
.missing_options
= get_missing_options(hdev
);
546 hci_dev_unlock(hdev
);
548 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0, &rp
,
552 static u32
get_supported_settings(struct hci_dev
*hdev
)
556 settings
|= MGMT_SETTING_POWERED
;
557 settings
|= MGMT_SETTING_BONDABLE
;
558 settings
|= MGMT_SETTING_DEBUG_KEYS
;
559 settings
|= MGMT_SETTING_CONNECTABLE
;
560 settings
|= MGMT_SETTING_DISCOVERABLE
;
562 if (lmp_bredr_capable(hdev
)) {
563 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
564 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
565 settings
|= MGMT_SETTING_BREDR
;
566 settings
|= MGMT_SETTING_LINK_SECURITY
;
568 if (lmp_ssp_capable(hdev
)) {
569 settings
|= MGMT_SETTING_SSP
;
570 settings
|= MGMT_SETTING_HS
;
573 if (lmp_sc_capable(hdev
) ||
574 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
575 settings
|= MGMT_SETTING_SECURE_CONN
;
578 if (lmp_le_capable(hdev
)) {
579 settings
|= MGMT_SETTING_LE
;
580 settings
|= MGMT_SETTING_ADVERTISING
;
581 settings
|= MGMT_SETTING_SECURE_CONN
;
582 settings
|= MGMT_SETTING_PRIVACY
;
585 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
587 settings
|= MGMT_SETTING_CONFIGURATION
;
592 static u32
get_current_settings(struct hci_dev
*hdev
)
596 if (hdev_is_powered(hdev
))
597 settings
|= MGMT_SETTING_POWERED
;
599 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
600 settings
|= MGMT_SETTING_CONNECTABLE
;
602 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
603 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
605 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
606 settings
|= MGMT_SETTING_DISCOVERABLE
;
608 if (test_bit(HCI_BONDABLE
, &hdev
->dev_flags
))
609 settings
|= MGMT_SETTING_BONDABLE
;
611 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
612 settings
|= MGMT_SETTING_BREDR
;
614 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
615 settings
|= MGMT_SETTING_LE
;
617 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
618 settings
|= MGMT_SETTING_LINK_SECURITY
;
620 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
621 settings
|= MGMT_SETTING_SSP
;
623 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
624 settings
|= MGMT_SETTING_HS
;
626 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
627 settings
|= MGMT_SETTING_ADVERTISING
;
629 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
630 settings
|= MGMT_SETTING_SECURE_CONN
;
632 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
633 settings
|= MGMT_SETTING_DEBUG_KEYS
;
635 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
636 settings
|= MGMT_SETTING_PRIVACY
;
641 #define PNP_INFO_SVCLASS_ID 0x1200
643 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
645 u8
*ptr
= data
, *uuids_start
= NULL
;
646 struct bt_uuid
*uuid
;
651 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
654 if (uuid
->size
!= 16)
657 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
661 if (uuid16
== PNP_INFO_SVCLASS_ID
)
667 uuids_start
[1] = EIR_UUID16_ALL
;
671 /* Stop if not enough space to put next UUID */
672 if ((ptr
- data
) + sizeof(u16
) > len
) {
673 uuids_start
[1] = EIR_UUID16_SOME
;
677 *ptr
++ = (uuid16
& 0x00ff);
678 *ptr
++ = (uuid16
& 0xff00) >> 8;
679 uuids_start
[0] += sizeof(uuid16
);
685 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
687 u8
*ptr
= data
, *uuids_start
= NULL
;
688 struct bt_uuid
*uuid
;
693 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
694 if (uuid
->size
!= 32)
700 uuids_start
[1] = EIR_UUID32_ALL
;
704 /* Stop if not enough space to put next UUID */
705 if ((ptr
- data
) + sizeof(u32
) > len
) {
706 uuids_start
[1] = EIR_UUID32_SOME
;
710 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
712 uuids_start
[0] += sizeof(u32
);
718 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
720 u8
*ptr
= data
, *uuids_start
= NULL
;
721 struct bt_uuid
*uuid
;
726 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
727 if (uuid
->size
!= 128)
733 uuids_start
[1] = EIR_UUID128_ALL
;
737 /* Stop if not enough space to put next UUID */
738 if ((ptr
- data
) + 16 > len
) {
739 uuids_start
[1] = EIR_UUID128_SOME
;
743 memcpy(ptr
, uuid
->uuid
, 16);
745 uuids_start
[0] += 16;
751 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
753 struct pending_cmd
*cmd
;
755 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
756 if (cmd
->opcode
== opcode
)
763 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
764 struct hci_dev
*hdev
,
767 struct pending_cmd
*cmd
;
769 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
770 if (cmd
->user_data
!= data
)
772 if (cmd
->opcode
== opcode
)
779 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
784 name_len
= strlen(hdev
->dev_name
);
786 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
788 if (name_len
> max_len
) {
790 ptr
[1] = EIR_NAME_SHORT
;
792 ptr
[1] = EIR_NAME_COMPLETE
;
794 ptr
[0] = name_len
+ 1;
796 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
798 ad_len
+= (name_len
+ 2);
799 ptr
+= (name_len
+ 2);
805 static void update_scan_rsp_data(struct hci_request
*req
)
807 struct hci_dev
*hdev
= req
->hdev
;
808 struct hci_cp_le_set_scan_rsp_data cp
;
811 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
814 memset(&cp
, 0, sizeof(cp
));
816 len
= create_scan_rsp_data(hdev
, cp
.data
);
818 if (hdev
->scan_rsp_data_len
== len
&&
819 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
822 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
823 hdev
->scan_rsp_data_len
= len
;
827 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
830 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
832 struct pending_cmd
*cmd
;
834 /* If there's a pending mgmt command the flags will not yet have
835 * their final values, so check for this first.
837 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
839 struct mgmt_mode
*cp
= cmd
->param
;
841 return LE_AD_GENERAL
;
842 else if (cp
->val
== 0x02)
843 return LE_AD_LIMITED
;
845 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
846 return LE_AD_LIMITED
;
847 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
848 return LE_AD_GENERAL
;
854 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
856 u8 ad_len
= 0, flags
= 0;
858 flags
|= get_adv_discov_flags(hdev
);
860 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
861 flags
|= LE_AD_NO_BREDR
;
864 BT_DBG("adv flags 0x%02x", flags
);
874 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
876 ptr
[1] = EIR_TX_POWER
;
877 ptr
[2] = (u8
) hdev
->adv_tx_power
;
886 static void update_adv_data(struct hci_request
*req
)
888 struct hci_dev
*hdev
= req
->hdev
;
889 struct hci_cp_le_set_adv_data cp
;
892 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
895 memset(&cp
, 0, sizeof(cp
));
897 len
= create_adv_data(hdev
, cp
.data
);
899 if (hdev
->adv_data_len
== len
&&
900 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
903 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
904 hdev
->adv_data_len
= len
;
908 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
911 int mgmt_update_adv_data(struct hci_dev
*hdev
)
913 struct hci_request req
;
915 hci_req_init(&req
, hdev
);
916 update_adv_data(&req
);
918 return hci_req_run(&req
, NULL
);
921 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
926 name_len
= strlen(hdev
->dev_name
);
932 ptr
[1] = EIR_NAME_SHORT
;
934 ptr
[1] = EIR_NAME_COMPLETE
;
936 /* EIR Data length */
937 ptr
[0] = name_len
+ 1;
939 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
941 ptr
+= (name_len
+ 2);
944 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
946 ptr
[1] = EIR_TX_POWER
;
947 ptr
[2] = (u8
) hdev
->inq_tx_power
;
952 if (hdev
->devid_source
> 0) {
954 ptr
[1] = EIR_DEVICE_ID
;
956 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
957 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
958 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
959 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
964 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
965 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
966 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
969 static void update_eir(struct hci_request
*req
)
971 struct hci_dev
*hdev
= req
->hdev
;
972 struct hci_cp_write_eir cp
;
974 if (!hdev_is_powered(hdev
))
977 if (!lmp_ext_inq_capable(hdev
))
980 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
983 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
986 memset(&cp
, 0, sizeof(cp
));
988 create_eir(hdev
, cp
.data
);
990 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
993 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
995 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
998 static u8
get_service_classes(struct hci_dev
*hdev
)
1000 struct bt_uuid
*uuid
;
1003 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1004 val
|= uuid
->svc_hint
;
1009 static void update_class(struct hci_request
*req
)
1011 struct hci_dev
*hdev
= req
->hdev
;
1014 BT_DBG("%s", hdev
->name
);
1016 if (!hdev_is_powered(hdev
))
1019 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1022 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1025 cod
[0] = hdev
->minor_class
;
1026 cod
[1] = hdev
->major_class
;
1027 cod
[2] = get_service_classes(hdev
);
1029 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
1032 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1035 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1038 static bool get_connectable(struct hci_dev
*hdev
)
1040 struct pending_cmd
*cmd
;
1042 /* If there's a pending mgmt command the flag will not yet have
1043 * it's final value, so check for this first.
1045 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1047 struct mgmt_mode
*cp
= cmd
->param
;
1051 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1054 static void disable_advertising(struct hci_request
*req
)
1058 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1061 static void enable_advertising(struct hci_request
*req
)
1063 struct hci_dev
*hdev
= req
->hdev
;
1064 struct hci_cp_le_set_adv_param cp
;
1065 u8 own_addr_type
, enable
= 0x01;
1068 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1071 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1072 disable_advertising(req
);
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1079 clear_bit(HCI_LE_ADV
, &hdev
->dev_flags
);
1081 connectable
= get_connectable(hdev
);
1083 /* Set require_privacy to true only when non-connectable
1084 * advertising is used. In that case it is fine to use a
1085 * non-resolvable private address.
1087 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1090 memset(&cp
, 0, sizeof(cp
));
1091 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1092 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1093 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
1094 cp
.own_address_type
= own_addr_type
;
1095 cp
.channel_map
= hdev
->le_adv_channel_map
;
1097 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1099 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1102 static void service_cache_off(struct work_struct
*work
)
1104 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1105 service_cache
.work
);
1106 struct hci_request req
;
1108 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1111 hci_req_init(&req
, hdev
);
1118 hci_dev_unlock(hdev
);
1120 hci_req_run(&req
, NULL
);
1123 static void rpa_expired(struct work_struct
*work
)
1125 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1127 struct hci_request req
;
1131 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
1133 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1136 /* The generation of a new RPA and programming it into the
1137 * controller happens in the enable_advertising() function.
1139 hci_req_init(&req
, hdev
);
1140 enable_advertising(&req
);
1141 hci_req_run(&req
, NULL
);
1144 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1146 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
1149 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1150 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1152 /* Non-mgmt controlled devices get this bit set
1153 * implicitly so that pairing works for them, however
1154 * for mgmt we require user-space to explicitly enable
1157 clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1160 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1161 void *data
, u16 data_len
)
1163 struct mgmt_rp_read_info rp
;
1165 BT_DBG("sock %p %s", sk
, hdev
->name
);
1169 memset(&rp
, 0, sizeof(rp
));
1171 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1173 rp
.version
= hdev
->hci_ver
;
1174 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1176 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1177 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1179 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1181 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1182 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1184 hci_dev_unlock(hdev
);
1186 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1190 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1197 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1198 struct hci_dev
*hdev
, void *data
,
1201 struct pending_cmd
*cmd
;
1203 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1207 cmd
->opcode
= opcode
;
1208 cmd
->index
= hdev
->id
;
1210 cmd
->param
= kmemdup(data
, len
, GFP_KERNEL
);
1216 cmd
->param_len
= len
;
1221 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1226 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1227 void (*cb
)(struct pending_cmd
*cmd
,
1231 struct pending_cmd
*cmd
, *tmp
;
1233 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1234 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1241 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1243 list_del(&cmd
->list
);
1244 mgmt_pending_free(cmd
);
1247 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1249 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1251 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1255 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1257 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1259 if (hci_conn_count(hdev
) == 0) {
1260 cancel_delayed_work(&hdev
->power_off
);
1261 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1265 static bool hci_stop_discovery(struct hci_request
*req
)
1267 struct hci_dev
*hdev
= req
->hdev
;
1268 struct hci_cp_remote_name_req_cancel cp
;
1269 struct inquiry_entry
*e
;
1271 switch (hdev
->discovery
.state
) {
1272 case DISCOVERY_FINDING
:
1273 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1274 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1276 cancel_delayed_work(&hdev
->le_scan_disable
);
1277 hci_req_add_le_scan_disable(req
);
1282 case DISCOVERY_RESOLVING
:
1283 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1288 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1289 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1295 /* Passive scanning */
1296 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
1297 hci_req_add_le_scan_disable(req
);
1307 static int clean_up_hci_state(struct hci_dev
*hdev
)
1309 struct hci_request req
;
1310 struct hci_conn
*conn
;
1311 bool discov_stopped
;
1314 hci_req_init(&req
, hdev
);
1316 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1317 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1319 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1322 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1323 disable_advertising(&req
);
1325 discov_stopped
= hci_stop_discovery(&req
);
1327 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1328 struct hci_cp_disconnect dc
;
1329 struct hci_cp_reject_conn_req rej
;
1331 switch (conn
->state
) {
1334 dc
.handle
= cpu_to_le16(conn
->handle
);
1335 dc
.reason
= 0x15; /* Terminated due to Power Off */
1336 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1339 if (conn
->type
== LE_LINK
)
1340 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1342 else if (conn
->type
== ACL_LINK
)
1343 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1347 bacpy(&rej
.bdaddr
, &conn
->dst
);
1348 rej
.reason
= 0x15; /* Terminated due to Power Off */
1349 if (conn
->type
== ACL_LINK
)
1350 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1352 else if (conn
->type
== SCO_LINK
)
1353 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1359 err
= hci_req_run(&req
, clean_up_hci_complete
);
1360 if (!err
&& discov_stopped
)
1361 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1366 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1369 struct mgmt_mode
*cp
= data
;
1370 struct pending_cmd
*cmd
;
1373 BT_DBG("request for %s", hdev
->name
);
1375 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1376 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1377 MGMT_STATUS_INVALID_PARAMS
);
1381 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1382 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1387 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1388 cancel_delayed_work(&hdev
->power_off
);
1391 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1393 err
= mgmt_powered(hdev
, 1);
1398 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1399 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1403 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1410 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1413 /* Disconnect connections, stop scans, etc */
1414 err
= clean_up_hci_state(hdev
);
1416 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1417 HCI_POWER_OFF_TIMEOUT
);
1419 /* ENODATA means there were no HCI commands queued */
1420 if (err
== -ENODATA
) {
1421 cancel_delayed_work(&hdev
->power_off
);
1422 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1428 hci_dev_unlock(hdev
);
1432 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1436 ev
= cpu_to_le32(get_current_settings(hdev
));
1438 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1441 int mgmt_new_settings(struct hci_dev
*hdev
)
1443 return new_settings(hdev
, NULL
);
1448 struct hci_dev
*hdev
;
1452 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1454 struct cmd_lookup
*match
= data
;
1456 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1458 list_del(&cmd
->list
);
1460 if (match
->sk
== NULL
) {
1461 match
->sk
= cmd
->sk
;
1462 sock_hold(match
->sk
);
1465 mgmt_pending_free(cmd
);
1468 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1472 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1473 mgmt_pending_remove(cmd
);
1476 static void cmd_complete_rsp(struct pending_cmd
*cmd
, void *data
)
1478 if (cmd
->cmd_complete
) {
1481 cmd
->cmd_complete(cmd
, *status
);
1482 mgmt_pending_remove(cmd
);
1487 cmd_status_rsp(cmd
, data
);
1490 static void generic_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1492 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
,
1496 static void addr_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1498 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
,
1499 sizeof(struct mgmt_addr_info
));
1502 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1504 if (!lmp_bredr_capable(hdev
))
1505 return MGMT_STATUS_NOT_SUPPORTED
;
1506 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1507 return MGMT_STATUS_REJECTED
;
1509 return MGMT_STATUS_SUCCESS
;
1512 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1514 if (!lmp_le_capable(hdev
))
1515 return MGMT_STATUS_NOT_SUPPORTED
;
1516 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1517 return MGMT_STATUS_REJECTED
;
1519 return MGMT_STATUS_SUCCESS
;
1522 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1524 struct pending_cmd
*cmd
;
1525 struct mgmt_mode
*cp
;
1526 struct hci_request req
;
1529 BT_DBG("status 0x%02x", status
);
1533 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1538 u8 mgmt_err
= mgmt_status(status
);
1539 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1546 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1549 if (hdev
->discov_timeout
> 0) {
1550 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1551 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1555 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1559 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1562 new_settings(hdev
, cmd
->sk
);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req
, hdev
);
1570 __hci_update_page_scan(&req
);
1572 hci_req_run(&req
, NULL
);
1575 mgmt_pending_remove(cmd
);
1578 hci_dev_unlock(hdev
);
1581 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1584 struct mgmt_cp_set_discoverable
*cp
= data
;
1585 struct pending_cmd
*cmd
;
1586 struct hci_request req
;
1591 BT_DBG("request for %s", hdev
->name
);
1593 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1594 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1595 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1596 MGMT_STATUS_REJECTED
);
1598 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1599 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1600 MGMT_STATUS_INVALID_PARAMS
);
1602 timeout
= __le16_to_cpu(cp
->timeout
);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp
->val
== 0x00 && timeout
> 0) ||
1608 (cp
->val
== 0x02 && timeout
== 0))
1609 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1610 MGMT_STATUS_INVALID_PARAMS
);
1614 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1615 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1616 MGMT_STATUS_NOT_POWERED
);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1622 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1627 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1628 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1629 MGMT_STATUS_REJECTED
);
1633 if (!hdev_is_powered(hdev
)) {
1634 bool changed
= false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1641 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1645 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1650 err
= new_settings(hdev
, sk
);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1660 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1661 &hdev
->dev_flags
)) {
1662 cancel_delayed_work(&hdev
->discov_off
);
1663 hdev
->discov_timeout
= timeout
;
1665 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1666 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1667 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1671 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1675 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev
->discov_off
);
1686 hdev
->discov_timeout
= timeout
;
1688 /* Limited discoverable mode */
1689 if (cp
->val
== 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1694 hci_req_init(&req
, hdev
);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1705 struct hci_cp_write_current_iac_lap hci_cp
;
1707 if (cp
->val
== 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1710 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1711 hci_cp
.iac_lap
[1] = 0x8b;
1712 hci_cp
.iac_lap
[2] = 0x9e;
1713 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1714 hci_cp
.iac_lap
[4] = 0x8b;
1715 hci_cp
.iac_lap
[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1720 hci_cp
.iac_lap
[1] = 0x8b;
1721 hci_cp
.iac_lap
[2] = 0x9e;
1724 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1725 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1727 scan
|= SCAN_INQUIRY
;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1732 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1735 update_adv_data(&req
);
1737 err
= hci_req_run(&req
, set_discoverable_complete
);
1739 mgmt_pending_remove(cmd
);
1742 hci_dev_unlock(hdev
);
1746 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1748 struct hci_dev
*hdev
= req
->hdev
;
1749 struct hci_cp_write_page_scan_activity acp
;
1752 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1755 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1759 type
= PAGE_SCAN_TYPE_INTERLACED
;
1761 /* 160 msec page scan interval */
1762 acp
.interval
= cpu_to_le16(0x0100);
1764 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1766 /* default 1.28 sec page scan */
1767 acp
.interval
= cpu_to_le16(0x0800);
1770 acp
.window
= cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1773 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1774 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1777 if (hdev
->page_scan_type
!= type
)
1778 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1781 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1783 struct pending_cmd
*cmd
;
1784 struct mgmt_mode
*cp
;
1785 bool conn_changed
, discov_changed
;
1787 BT_DBG("status 0x%02x", status
);
1791 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1796 u8 mgmt_err
= mgmt_status(status
);
1797 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1803 conn_changed
= !test_and_set_bit(HCI_CONNECTABLE
,
1805 discov_changed
= false;
1807 conn_changed
= test_and_clear_bit(HCI_CONNECTABLE
,
1809 discov_changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1813 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1815 if (conn_changed
|| discov_changed
) {
1816 new_settings(hdev
, cmd
->sk
);
1817 hci_update_page_scan(hdev
);
1819 mgmt_update_adv_data(hdev
);
1820 hci_update_background_scan(hdev
);
1824 mgmt_pending_remove(cmd
);
1827 hci_dev_unlock(hdev
);
1830 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1831 struct sock
*sk
, u8 val
)
1833 bool changed
= false;
1836 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1840 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1842 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1843 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1846 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1851 hci_update_page_scan(hdev
);
1852 hci_update_background_scan(hdev
);
1853 return new_settings(hdev
, sk
);
1859 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1862 struct mgmt_mode
*cp
= data
;
1863 struct pending_cmd
*cmd
;
1864 struct hci_request req
;
1868 BT_DBG("request for %s", hdev
->name
);
1870 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1871 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1872 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1873 MGMT_STATUS_REJECTED
);
1875 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1876 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1877 MGMT_STATUS_INVALID_PARAMS
);
1881 if (!hdev_is_powered(hdev
)) {
1882 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1886 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1887 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1888 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1893 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1899 hci_req_init(&req
, hdev
);
1901 /* If BR/EDR is not enabled and we disable advertising as a
1902 * by-product of disabling connectable, we need to update the
1903 * advertising flags.
1905 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1907 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1908 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1910 update_adv_data(&req
);
1911 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1915 /* If we don't have any whitelist entries just
1916 * disable all scanning. If there are entries
1917 * and we had both page and inquiry scanning
1918 * enabled then fall back to only page scanning.
1919 * Otherwise no changes are needed.
1921 if (list_empty(&hdev
->whitelist
))
1922 scan
= SCAN_DISABLED
;
1923 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
1926 goto no_scan_update
;
1928 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1929 hdev
->discov_timeout
> 0)
1930 cancel_delayed_work(&hdev
->discov_off
);
1933 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1937 /* If we're going from non-connectable to connectable or
1938 * vice-versa when fast connectable is enabled ensure that fast
1939 * connectable gets disabled. write_fast_connectable won't do
1940 * anything if the page scan parameters are already what they
1943 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1944 write_fast_connectable(&req
, false);
1946 /* Update the advertising parameters if necessary */
1947 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1948 enable_advertising(&req
);
1950 err
= hci_req_run(&req
, set_connectable_complete
);
1952 mgmt_pending_remove(cmd
);
1953 if (err
== -ENODATA
)
1954 err
= set_connectable_update_settings(hdev
, sk
,
1960 hci_dev_unlock(hdev
);
1964 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1967 struct mgmt_mode
*cp
= data
;
1971 BT_DBG("request for %s", hdev
->name
);
1973 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1974 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1975 MGMT_STATUS_INVALID_PARAMS
);
1980 changed
= !test_and_set_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1982 changed
= test_and_clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1984 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1989 err
= new_settings(hdev
, sk
);
1992 hci_dev_unlock(hdev
);
1996 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1999 struct mgmt_mode
*cp
= data
;
2000 struct pending_cmd
*cmd
;
2004 BT_DBG("request for %s", hdev
->name
);
2006 status
= mgmt_bredr_support(hdev
);
2008 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2011 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2012 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2013 MGMT_STATUS_INVALID_PARAMS
);
2017 if (!hdev_is_powered(hdev
)) {
2018 bool changed
= false;
2020 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
2021 &hdev
->dev_flags
)) {
2022 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
2026 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2031 err
= new_settings(hdev
, sk
);
2036 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2037 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2044 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2045 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2049 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2055 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2057 mgmt_pending_remove(cmd
);
2062 hci_dev_unlock(hdev
);
2066 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2068 struct mgmt_mode
*cp
= data
;
2069 struct pending_cmd
*cmd
;
2073 BT_DBG("request for %s", hdev
->name
);
2075 status
= mgmt_bredr_support(hdev
);
2077 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2079 if (!lmp_ssp_capable(hdev
))
2080 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2081 MGMT_STATUS_NOT_SUPPORTED
);
2083 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2084 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2085 MGMT_STATUS_INVALID_PARAMS
);
2089 if (!hdev_is_powered(hdev
)) {
2093 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
2096 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
2099 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
2102 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2105 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2110 err
= new_settings(hdev
, sk
);
2115 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
2116 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
2117 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2122 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
2123 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2127 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2133 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
2134 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2135 sizeof(cp
->val
), &cp
->val
);
2137 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2139 mgmt_pending_remove(cmd
);
2144 hci_dev_unlock(hdev
);
2148 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2150 struct mgmt_mode
*cp
= data
;
2155 BT_DBG("request for %s", hdev
->name
);
2157 status
= mgmt_bredr_support(hdev
);
2159 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2161 if (!lmp_ssp_capable(hdev
))
2162 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2163 MGMT_STATUS_NOT_SUPPORTED
);
2165 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
2166 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2167 MGMT_STATUS_REJECTED
);
2169 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2170 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2171 MGMT_STATUS_INVALID_PARAMS
);
2176 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2178 if (hdev_is_powered(hdev
)) {
2179 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2180 MGMT_STATUS_REJECTED
);
2184 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2187 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2192 err
= new_settings(hdev
, sk
);
2195 hci_dev_unlock(hdev
);
2199 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
2201 struct cmd_lookup match
= { NULL
, hdev
};
2206 u8 mgmt_err
= mgmt_status(status
);
2208 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2213 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2215 new_settings(hdev
, match
.sk
);
2220 /* Make sure the controller has a good default for
2221 * advertising data. Restrict the update to when LE
2222 * has actually been enabled. During power on, the
2223 * update in powered_update_hci will take care of it.
2225 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2226 struct hci_request req
;
2228 hci_req_init(&req
, hdev
);
2229 update_adv_data(&req
);
2230 update_scan_rsp_data(&req
);
2231 hci_req_run(&req
, NULL
);
2233 hci_update_background_scan(hdev
);
2237 hci_dev_unlock(hdev
);
2240 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2242 struct mgmt_mode
*cp
= data
;
2243 struct hci_cp_write_le_host_supported hci_cp
;
2244 struct pending_cmd
*cmd
;
2245 struct hci_request req
;
2249 BT_DBG("request for %s", hdev
->name
);
2251 if (!lmp_le_capable(hdev
))
2252 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2253 MGMT_STATUS_NOT_SUPPORTED
);
2255 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2256 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2257 MGMT_STATUS_INVALID_PARAMS
);
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2261 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2262 MGMT_STATUS_REJECTED
);
2267 enabled
= lmp_host_le_capable(hdev
);
2269 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2270 bool changed
= false;
2272 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2273 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2277 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2278 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2282 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2287 err
= new_settings(hdev
, sk
);
2292 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2294 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2299 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2305 hci_req_init(&req
, hdev
);
2307 memset(&hci_cp
, 0, sizeof(hci_cp
));
2311 hci_cp
.simul
= 0x00;
2313 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
2314 disable_advertising(&req
);
2317 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2320 err
= hci_req_run(&req
, le_enable_complete
);
2322 mgmt_pending_remove(cmd
);
2325 hci_dev_unlock(hdev
);
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2335 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2337 struct pending_cmd
*cmd
;
2339 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2340 switch (cmd
->opcode
) {
2341 case MGMT_OP_ADD_UUID
:
2342 case MGMT_OP_REMOVE_UUID
:
2343 case MGMT_OP_SET_DEV_CLASS
:
2344 case MGMT_OP_SET_POWERED
:
2352 static const u8 bluetooth_base_uuid
[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2357 static u8
get_uuid_size(const u8
*uuid
)
2361 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2364 val
= get_unaligned_le32(&uuid
[12]);
2371 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2373 struct pending_cmd
*cmd
;
2377 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2381 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2382 hdev
->dev_class
, 3);
2384 mgmt_pending_remove(cmd
);
2387 hci_dev_unlock(hdev
);
2390 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2392 BT_DBG("status 0x%02x", status
);
2394 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2397 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2399 struct mgmt_cp_add_uuid
*cp
= data
;
2400 struct pending_cmd
*cmd
;
2401 struct hci_request req
;
2402 struct bt_uuid
*uuid
;
2405 BT_DBG("request for %s", hdev
->name
);
2409 if (pending_eir_or_class(hdev
)) {
2410 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2415 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2421 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2422 uuid
->svc_hint
= cp
->svc_hint
;
2423 uuid
->size
= get_uuid_size(cp
->uuid
);
2425 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2427 hci_req_init(&req
, hdev
);
2432 err
= hci_req_run(&req
, add_uuid_complete
);
2434 if (err
!= -ENODATA
)
2437 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2438 hdev
->dev_class
, 3);
2442 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2451 hci_dev_unlock(hdev
);
2455 static bool enable_service_cache(struct hci_dev
*hdev
)
2457 if (!hdev_is_powered(hdev
))
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2461 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2469 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2471 BT_DBG("status 0x%02x", status
);
2473 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2476 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2479 struct mgmt_cp_remove_uuid
*cp
= data
;
2480 struct pending_cmd
*cmd
;
2481 struct bt_uuid
*match
, *tmp
;
2482 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req
;
2486 BT_DBG("request for %s", hdev
->name
);
2490 if (pending_eir_or_class(hdev
)) {
2491 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2496 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2497 hci_uuids_clear(hdev
);
2499 if (enable_service_cache(hdev
)) {
2500 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2501 0, hdev
->dev_class
, 3);
2510 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2511 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2514 list_del(&match
->list
);
2520 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2521 MGMT_STATUS_INVALID_PARAMS
);
2526 hci_req_init(&req
, hdev
);
2531 err
= hci_req_run(&req
, remove_uuid_complete
);
2533 if (err
!= -ENODATA
)
2536 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2537 hdev
->dev_class
, 3);
2541 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2550 hci_dev_unlock(hdev
);
2554 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2556 BT_DBG("status 0x%02x", status
);
2558 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2561 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2564 struct mgmt_cp_set_dev_class
*cp
= data
;
2565 struct pending_cmd
*cmd
;
2566 struct hci_request req
;
2569 BT_DBG("request for %s", hdev
->name
);
2571 if (!lmp_bredr_capable(hdev
))
2572 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2573 MGMT_STATUS_NOT_SUPPORTED
);
2577 if (pending_eir_or_class(hdev
)) {
2578 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2583 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2584 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2585 MGMT_STATUS_INVALID_PARAMS
);
2589 hdev
->major_class
= cp
->major
;
2590 hdev
->minor_class
= cp
->minor
;
2592 if (!hdev_is_powered(hdev
)) {
2593 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2594 hdev
->dev_class
, 3);
2598 hci_req_init(&req
, hdev
);
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2601 hci_dev_unlock(hdev
);
2602 cancel_delayed_work_sync(&hdev
->service_cache
);
2609 err
= hci_req_run(&req
, set_class_complete
);
2611 if (err
!= -ENODATA
)
2614 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2615 hdev
->dev_class
, 3);
2619 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2628 hci_dev_unlock(hdev
);
2632 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2635 struct mgmt_cp_load_link_keys
*cp
= data
;
2636 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2637 sizeof(struct mgmt_link_key_info
));
2638 u16 key_count
, expected_len
;
2642 BT_DBG("request for %s", hdev
->name
);
2644 if (!lmp_bredr_capable(hdev
))
2645 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2646 MGMT_STATUS_NOT_SUPPORTED
);
2648 key_count
= __le16_to_cpu(cp
->key_count
);
2649 if (key_count
> max_key_count
) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2652 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2653 MGMT_STATUS_INVALID_PARAMS
);
2656 expected_len
= sizeof(*cp
) + key_count
*
2657 sizeof(struct mgmt_link_key_info
);
2658 if (expected_len
!= len
) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2661 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2662 MGMT_STATUS_INVALID_PARAMS
);
2665 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2666 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2667 MGMT_STATUS_INVALID_PARAMS
);
2669 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2672 for (i
= 0; i
< key_count
; i
++) {
2673 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2675 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2676 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2677 MGMT_STATUS_INVALID_PARAMS
);
2682 hci_link_keys_clear(hdev
);
2685 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2688 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2692 new_settings(hdev
, NULL
);
2694 for (i
= 0; i
< key_count
; i
++) {
2695 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2700 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2703 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2704 key
->type
, key
->pin_len
, NULL
);
2707 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2709 hci_dev_unlock(hdev
);
2714 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2715 u8 addr_type
, struct sock
*skip_sk
)
2717 struct mgmt_ev_device_unpaired ev
;
2719 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2720 ev
.addr
.type
= addr_type
;
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2726 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2729 struct mgmt_cp_unpair_device
*cp
= data
;
2730 struct mgmt_rp_unpair_device rp
;
2731 struct hci_cp_disconnect dc
;
2732 struct pending_cmd
*cmd
;
2733 struct hci_conn
*conn
;
2736 memset(&rp
, 0, sizeof(rp
));
2737 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2738 rp
.addr
.type
= cp
->addr
.type
;
2740 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2741 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2742 MGMT_STATUS_INVALID_PARAMS
,
2745 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2746 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2747 MGMT_STATUS_INVALID_PARAMS
,
2752 if (!hdev_is_powered(hdev
)) {
2753 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2754 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2758 if (cp
->addr
.type
== BDADDR_BREDR
) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2767 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2772 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2776 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2789 if (!cp
->disconnect
)
2793 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2794 addr_type
= ADDR_LE_DEV_PUBLIC
;
2796 addr_type
= ADDR_LE_DEV_RANDOM
;
2798 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2800 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2804 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2805 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2813 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2815 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2819 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2826 cmd
->cmd_complete
= addr_cmd_complete
;
2828 dc
.handle
= cpu_to_le16(conn
->handle
);
2829 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2830 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2832 mgmt_pending_remove(cmd
);
2835 hci_dev_unlock(hdev
);
2839 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2842 struct mgmt_cp_disconnect
*cp
= data
;
2843 struct mgmt_rp_disconnect rp
;
2844 struct pending_cmd
*cmd
;
2845 struct hci_conn
*conn
;
2850 memset(&rp
, 0, sizeof(rp
));
2851 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2852 rp
.addr
.type
= cp
->addr
.type
;
2854 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2855 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2856 MGMT_STATUS_INVALID_PARAMS
,
2861 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2862 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2863 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2868 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2869 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2873 if (cp
->addr
.type
== BDADDR_BREDR
)
2874 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2877 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2879 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2880 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2881 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2885 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2891 cmd
->cmd_complete
= generic_cmd_complete
;
2893 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2895 mgmt_pending_remove(cmd
);
2898 hci_dev_unlock(hdev
);
2902 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2904 switch (link_type
) {
2906 switch (addr_type
) {
2907 case ADDR_LE_DEV_PUBLIC
:
2908 return BDADDR_LE_PUBLIC
;
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM
;
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR
;
2921 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2924 struct mgmt_rp_get_connections
*rp
;
2934 if (!hdev_is_powered(hdev
)) {
2935 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2936 MGMT_STATUS_NOT_POWERED
);
2941 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2946 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2947 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2954 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2957 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2958 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2959 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2964 rp
->conn_count
= cpu_to_le16(i
);
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2969 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2975 hci_dev_unlock(hdev
);
2979 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2980 struct mgmt_cp_pin_code_neg_reply
*cp
)
2982 struct pending_cmd
*cmd
;
2985 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2990 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2991 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2993 mgmt_pending_remove(cmd
);
2998 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3001 struct hci_conn
*conn
;
3002 struct mgmt_cp_pin_code_reply
*cp
= data
;
3003 struct hci_cp_pin_code_reply reply
;
3004 struct pending_cmd
*cmd
;
3011 if (!hdev_is_powered(hdev
)) {
3012 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3013 MGMT_STATUS_NOT_POWERED
);
3017 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3019 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3020 MGMT_STATUS_NOT_CONNECTED
);
3024 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp
;
3027 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3029 BT_ERR("PIN code is not 16 bytes long");
3031 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3033 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3034 MGMT_STATUS_INVALID_PARAMS
);
3039 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3045 cmd
->cmd_complete
= addr_cmd_complete
;
3047 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3048 reply
.pin_len
= cp
->pin_len
;
3049 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3051 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3053 mgmt_pending_remove(cmd
);
3056 hci_dev_unlock(hdev
);
3060 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3063 struct mgmt_cp_set_io_capability
*cp
= data
;
3067 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3068 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3069 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3073 hdev
->io_capability
= cp
->io_capability
;
3075 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3076 hdev
->io_capability
);
3078 hci_dev_unlock(hdev
);
3080 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
3084 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
3086 struct hci_dev
*hdev
= conn
->hdev
;
3087 struct pending_cmd
*cmd
;
3089 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3090 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3093 if (cmd
->user_data
!= conn
)
3102 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
3104 struct mgmt_rp_pair_device rp
;
3105 struct hci_conn
*conn
= cmd
->user_data
;
3107 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3108 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3110 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
3113 /* So we don't get further callbacks for this connection */
3114 conn
->connect_cfm_cb
= NULL
;
3115 conn
->security_cfm_cb
= NULL
;
3116 conn
->disconn_cfm_cb
= NULL
;
3118 hci_conn_drop(conn
);
3120 /* The device is paired so there is no need to remove
3121 * its connection parameters anymore.
3123 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3128 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3130 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3131 struct pending_cmd
*cmd
;
3133 cmd
= find_pairing(conn
);
3135 cmd
->cmd_complete(cmd
, status
);
3136 mgmt_pending_remove(cmd
);
3140 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3142 struct pending_cmd
*cmd
;
3144 BT_DBG("status %u", status
);
3146 cmd
= find_pairing(conn
);
3148 BT_DBG("Unable to find a pending command");
3152 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3153 mgmt_pending_remove(cmd
);
3156 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3158 struct pending_cmd
*cmd
;
3160 BT_DBG("status %u", status
);
3165 cmd
= find_pairing(conn
);
3167 BT_DBG("Unable to find a pending command");
3171 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3172 mgmt_pending_remove(cmd
);
3175 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3178 struct mgmt_cp_pair_device
*cp
= data
;
3179 struct mgmt_rp_pair_device rp
;
3180 struct pending_cmd
*cmd
;
3181 u8 sec_level
, auth_type
;
3182 struct hci_conn
*conn
;
3187 memset(&rp
, 0, sizeof(rp
));
3188 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3189 rp
.addr
.type
= cp
->addr
.type
;
3191 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3192 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3193 MGMT_STATUS_INVALID_PARAMS
,
3196 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3197 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3198 MGMT_STATUS_INVALID_PARAMS
,
3203 if (!hdev_is_powered(hdev
)) {
3204 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3205 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
3209 sec_level
= BT_SECURITY_MEDIUM
;
3210 auth_type
= HCI_AT_DEDICATED_BONDING
;
3212 if (cp
->addr
.type
== BDADDR_BREDR
) {
3213 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3218 /* Convert from L2CAP channel address type to HCI address type
3220 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3221 addr_type
= ADDR_LE_DEV_PUBLIC
;
3223 addr_type
= ADDR_LE_DEV_RANDOM
;
3225 /* When pairing a new device, it is expected to remember
3226 * this device for future connections. Adding the connection
3227 * parameter information ahead of time allows tracking
3228 * of the slave preferred values and will speed up any
3229 * further connection establishment.
3231 * If connection parameters already exist, then they
3232 * will be kept and this function does nothing.
3234 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3236 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3237 sec_level
, HCI_LE_CONN_TIMEOUT
,
3244 if (PTR_ERR(conn
) == -EBUSY
)
3245 status
= MGMT_STATUS_BUSY
;
3247 status
= MGMT_STATUS_CONNECT_FAILED
;
3249 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3255 if (conn
->connect_cfm_cb
) {
3256 hci_conn_drop(conn
);
3257 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3258 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3262 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3265 hci_conn_drop(conn
);
3269 cmd
->cmd_complete
= pairing_complete
;
3271 /* For LE, just connecting isn't a proof that the pairing finished */
3272 if (cp
->addr
.type
== BDADDR_BREDR
) {
3273 conn
->connect_cfm_cb
= pairing_complete_cb
;
3274 conn
->security_cfm_cb
= pairing_complete_cb
;
3275 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3277 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3278 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3279 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3282 conn
->io_capability
= cp
->io_cap
;
3283 cmd
->user_data
= hci_conn_get(conn
);
3285 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3286 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3287 cmd
->cmd_complete(cmd
, 0);
3288 mgmt_pending_remove(cmd
);
3294 hci_dev_unlock(hdev
);
3298 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3301 struct mgmt_addr_info
*addr
= data
;
3302 struct pending_cmd
*cmd
;
3303 struct hci_conn
*conn
;
3310 if (!hdev_is_powered(hdev
)) {
3311 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3312 MGMT_STATUS_NOT_POWERED
);
3316 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3318 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3319 MGMT_STATUS_INVALID_PARAMS
);
3323 conn
= cmd
->user_data
;
3325 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3326 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3327 MGMT_STATUS_INVALID_PARAMS
);
3331 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3332 mgmt_pending_remove(cmd
);
3334 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3335 addr
, sizeof(*addr
));
3337 hci_dev_unlock(hdev
);
3341 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3342 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3343 u16 hci_op
, __le32 passkey
)
3345 struct pending_cmd
*cmd
;
3346 struct hci_conn
*conn
;
3351 if (!hdev_is_powered(hdev
)) {
3352 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3353 MGMT_STATUS_NOT_POWERED
, addr
,
3358 if (addr
->type
== BDADDR_BREDR
)
3359 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3361 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3364 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3365 MGMT_STATUS_NOT_CONNECTED
, addr
,
3370 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3371 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3373 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3374 MGMT_STATUS_SUCCESS
, addr
,
3377 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3378 MGMT_STATUS_FAILED
, addr
,
3384 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3390 cmd
->cmd_complete
= addr_cmd_complete
;
3392 /* Continue with pairing via HCI */
3393 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3394 struct hci_cp_user_passkey_reply cp
;
3396 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3397 cp
.passkey
= passkey
;
3398 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3400 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3404 mgmt_pending_remove(cmd
);
3407 hci_dev_unlock(hdev
);
3411 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3412 void *data
, u16 len
)
3414 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3418 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3419 MGMT_OP_PIN_CODE_NEG_REPLY
,
3420 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3423 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3426 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3430 if (len
!= sizeof(*cp
))
3431 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3432 MGMT_STATUS_INVALID_PARAMS
);
3434 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3435 MGMT_OP_USER_CONFIRM_REPLY
,
3436 HCI_OP_USER_CONFIRM_REPLY
, 0);
3439 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3440 void *data
, u16 len
)
3442 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3446 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3447 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3448 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3451 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3454 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3458 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3459 MGMT_OP_USER_PASSKEY_REPLY
,
3460 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3463 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3464 void *data
, u16 len
)
3466 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3470 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3471 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3472 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3475 static void update_name(struct hci_request
*req
)
3477 struct hci_dev
*hdev
= req
->hdev
;
3478 struct hci_cp_write_local_name cp
;
3480 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3482 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3485 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3487 struct mgmt_cp_set_local_name
*cp
;
3488 struct pending_cmd
*cmd
;
3490 BT_DBG("status 0x%02x", status
);
3494 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3501 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3502 mgmt_status(status
));
3504 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3507 mgmt_pending_remove(cmd
);
3510 hci_dev_unlock(hdev
);
3513 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3516 struct mgmt_cp_set_local_name
*cp
= data
;
3517 struct pending_cmd
*cmd
;
3518 struct hci_request req
;
3525 /* If the old values are the same as the new ones just return a
3526 * direct command complete event.
3528 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3529 !memcmp(hdev
->short_name
, cp
->short_name
,
3530 sizeof(hdev
->short_name
))) {
3531 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3536 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3538 if (!hdev_is_powered(hdev
)) {
3539 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3541 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3546 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3552 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3558 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3560 hci_req_init(&req
, hdev
);
3562 if (lmp_bredr_capable(hdev
)) {
3567 /* The name is stored in the scan response data and so
3568 * no need to udpate the advertising data here.
3570 if (lmp_le_capable(hdev
))
3571 update_scan_rsp_data(&req
);
3573 err
= hci_req_run(&req
, set_name_complete
);
3575 mgmt_pending_remove(cmd
);
3578 hci_dev_unlock(hdev
);
3582 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3583 void *data
, u16 data_len
)
3585 struct pending_cmd
*cmd
;
3588 BT_DBG("%s", hdev
->name
);
3592 if (!hdev_is_powered(hdev
)) {
3593 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3594 MGMT_STATUS_NOT_POWERED
);
3598 if (!lmp_ssp_capable(hdev
)) {
3599 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3600 MGMT_STATUS_NOT_SUPPORTED
);
3604 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3605 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3610 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3616 if (bredr_sc_enabled(hdev
))
3617 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3620 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3623 mgmt_pending_remove(cmd
);
3626 hci_dev_unlock(hdev
);
3630 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3631 void *data
, u16 len
)
3635 BT_DBG("%s ", hdev
->name
);
3639 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3640 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3643 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3644 err
= cmd_complete(sk
, hdev
->id
,
3645 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3646 MGMT_STATUS_INVALID_PARAMS
,
3647 &cp
->addr
, sizeof(cp
->addr
));
3651 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3652 cp
->addr
.type
, cp
->hash
,
3653 cp
->rand
, NULL
, NULL
);
3655 status
= MGMT_STATUS_FAILED
;
3657 status
= MGMT_STATUS_SUCCESS
;
3659 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3660 status
, &cp
->addr
, sizeof(cp
->addr
));
3661 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3662 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3663 u8
*rand192
, *hash192
;
3666 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3667 err
= cmd_complete(sk
, hdev
->id
,
3668 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3669 MGMT_STATUS_INVALID_PARAMS
,
3670 &cp
->addr
, sizeof(cp
->addr
));
3674 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3678 rand192
= cp
->rand192
;
3679 hash192
= cp
->hash192
;
3682 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3683 cp
->addr
.type
, hash192
, rand192
,
3684 cp
->hash256
, cp
->rand256
);
3686 status
= MGMT_STATUS_FAILED
;
3688 status
= MGMT_STATUS_SUCCESS
;
3690 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3691 status
, &cp
->addr
, sizeof(cp
->addr
));
3693 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3694 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3695 MGMT_STATUS_INVALID_PARAMS
);
3699 hci_dev_unlock(hdev
);
3703 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3704 void *data
, u16 len
)
3706 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3710 BT_DBG("%s", hdev
->name
);
3712 if (cp
->addr
.type
!= BDADDR_BREDR
)
3713 return cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3714 MGMT_STATUS_INVALID_PARAMS
,
3715 &cp
->addr
, sizeof(cp
->addr
));
3719 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
3720 hci_remote_oob_data_clear(hdev
);
3721 status
= MGMT_STATUS_SUCCESS
;
3725 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3727 status
= MGMT_STATUS_INVALID_PARAMS
;
3729 status
= MGMT_STATUS_SUCCESS
;
3732 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3733 status
, &cp
->addr
, sizeof(cp
->addr
));
3735 hci_dev_unlock(hdev
);
3739 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
3741 struct hci_dev
*hdev
= req
->hdev
;
3742 struct hci_cp_le_set_scan_param param_cp
;
3743 struct hci_cp_le_set_scan_enable enable_cp
;
3744 struct hci_cp_inquiry inq_cp
;
3745 /* General inquiry access code (GIAC) */
3746 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3750 switch (hdev
->discovery
.type
) {
3751 case DISCOV_TYPE_BREDR
:
3752 *status
= mgmt_bredr_support(hdev
);
3756 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3757 *status
= MGMT_STATUS_BUSY
;
3761 hci_inquiry_cache_flush(hdev
);
3763 memset(&inq_cp
, 0, sizeof(inq_cp
));
3764 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3765 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3766 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3769 case DISCOV_TYPE_LE
:
3770 case DISCOV_TYPE_INTERLEAVED
:
3771 *status
= mgmt_le_support(hdev
);
3775 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3776 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3777 *status
= MGMT_STATUS_NOT_SUPPORTED
;
3781 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
)) {
3782 /* Don't let discovery abort an outgoing
3783 * connection attempt that's using directed
3786 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
,
3788 *status
= MGMT_STATUS_REJECTED
;
3792 disable_advertising(req
);
3795 /* If controller is scanning, it means the background scanning
3796 * is running. Thus, we should temporarily stop it in order to
3797 * set the discovery scanning parameters.
3799 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3800 hci_req_add_le_scan_disable(req
);
3802 memset(¶m_cp
, 0, sizeof(param_cp
));
3804 /* All active scans will be done with either a resolvable
3805 * private address (when privacy feature has been enabled)
3806 * or non-resolvable private address.
3808 err
= hci_update_random_address(req
, true, &own_addr_type
);
3810 *status
= MGMT_STATUS_FAILED
;
3814 param_cp
.type
= LE_SCAN_ACTIVE
;
3815 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3816 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3817 param_cp
.own_address_type
= own_addr_type
;
3818 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3821 memset(&enable_cp
, 0, sizeof(enable_cp
));
3822 enable_cp
.enable
= LE_SCAN_ENABLE
;
3823 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3824 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3829 *status
= MGMT_STATUS_INVALID_PARAMS
;
3836 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3838 struct pending_cmd
*cmd
;
3839 unsigned long timeout
;
3841 BT_DBG("status %d", status
);
3845 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3847 cmd
= mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
3850 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3851 mgmt_pending_remove(cmd
);
3855 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3859 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3861 switch (hdev
->discovery
.type
) {
3862 case DISCOV_TYPE_LE
:
3863 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3865 case DISCOV_TYPE_INTERLEAVED
:
3866 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3868 case DISCOV_TYPE_BREDR
:
3872 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3878 queue_delayed_work(hdev
->workqueue
,
3879 &hdev
->le_scan_disable
, timeout
);
3882 hci_dev_unlock(hdev
);
3885 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3886 void *data
, u16 len
)
3888 struct mgmt_cp_start_discovery
*cp
= data
;
3889 struct pending_cmd
*cmd
;
3890 struct hci_request req
;
3894 BT_DBG("%s", hdev
->name
);
3898 if (!hdev_is_powered(hdev
)) {
3899 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3900 MGMT_STATUS_NOT_POWERED
,
3901 &cp
->type
, sizeof(cp
->type
));
3905 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3906 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3907 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3908 MGMT_STATUS_BUSY
, &cp
->type
,
3913 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
3919 cmd
->cmd_complete
= generic_cmd_complete
;
3921 /* Clear the discovery filter first to free any previously
3922 * allocated memory for the UUID list.
3924 hci_discovery_filter_clear(hdev
);
3926 hdev
->discovery
.type
= cp
->type
;
3927 hdev
->discovery
.report_invalid_rssi
= false;
3929 hci_req_init(&req
, hdev
);
3931 if (!trigger_discovery(&req
, &status
)) {
3932 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3933 status
, &cp
->type
, sizeof(cp
->type
));
3934 mgmt_pending_remove(cmd
);
3938 err
= hci_req_run(&req
, start_discovery_complete
);
3940 mgmt_pending_remove(cmd
);
3944 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3947 hci_dev_unlock(hdev
);
3951 static void service_discovery_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
3953 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
, 1);
3956 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3957 void *data
, u16 len
)
3959 struct mgmt_cp_start_service_discovery
*cp
= data
;
3960 struct pending_cmd
*cmd
;
3961 struct hci_request req
;
3962 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
3963 u16 uuid_count
, expected_len
;
3967 BT_DBG("%s", hdev
->name
);
3971 if (!hdev_is_powered(hdev
)) {
3972 err
= cmd_complete(sk
, hdev
->id
,
3973 MGMT_OP_START_SERVICE_DISCOVERY
,
3974 MGMT_STATUS_NOT_POWERED
,
3975 &cp
->type
, sizeof(cp
->type
));
3979 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3980 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3981 err
= cmd_complete(sk
, hdev
->id
,
3982 MGMT_OP_START_SERVICE_DISCOVERY
,
3983 MGMT_STATUS_BUSY
, &cp
->type
,
3988 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
3989 if (uuid_count
> max_uuid_count
) {
3990 BT_ERR("service_discovery: too big uuid_count value %u",
3992 err
= cmd_complete(sk
, hdev
->id
,
3993 MGMT_OP_START_SERVICE_DISCOVERY
,
3994 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3999 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4000 if (expected_len
!= len
) {
4001 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4003 err
= cmd_complete(sk
, hdev
->id
,
4004 MGMT_OP_START_SERVICE_DISCOVERY
,
4005 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4010 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4017 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4019 /* Clear the discovery filter first to free any previously
4020 * allocated memory for the UUID list.
4022 hci_discovery_filter_clear(hdev
);
4024 hdev
->discovery
.type
= cp
->type
;
4025 hdev
->discovery
.rssi
= cp
->rssi
;
4026 hdev
->discovery
.uuid_count
= uuid_count
;
4028 if (uuid_count
> 0) {
4029 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4031 if (!hdev
->discovery
.uuids
) {
4032 err
= cmd_complete(sk
, hdev
->id
,
4033 MGMT_OP_START_SERVICE_DISCOVERY
,
4035 &cp
->type
, sizeof(cp
->type
));
4036 mgmt_pending_remove(cmd
);
4041 hci_req_init(&req
, hdev
);
4043 if (!trigger_discovery(&req
, &status
)) {
4044 err
= cmd_complete(sk
, hdev
->id
,
4045 MGMT_OP_START_SERVICE_DISCOVERY
,
4046 status
, &cp
->type
, sizeof(cp
->type
));
4047 mgmt_pending_remove(cmd
);
4051 err
= hci_req_run(&req
, start_discovery_complete
);
4053 mgmt_pending_remove(cmd
);
4057 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4060 hci_dev_unlock(hdev
);
4064 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4066 struct pending_cmd
*cmd
;
4068 BT_DBG("status %d", status
);
4072 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4074 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4075 mgmt_pending_remove(cmd
);
4079 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4081 hci_dev_unlock(hdev
);
4084 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4087 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4088 struct pending_cmd
*cmd
;
4089 struct hci_request req
;
4092 BT_DBG("%s", hdev
->name
);
4096 if (!hci_discovery_active(hdev
)) {
4097 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4098 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4099 sizeof(mgmt_cp
->type
));
4103 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4104 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4105 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
4106 sizeof(mgmt_cp
->type
));
4110 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4116 cmd
->cmd_complete
= generic_cmd_complete
;
4118 hci_req_init(&req
, hdev
);
4120 hci_stop_discovery(&req
);
4122 err
= hci_req_run(&req
, stop_discovery_complete
);
4124 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4128 mgmt_pending_remove(cmd
);
4130 /* If no HCI commands were sent we're done */
4131 if (err
== -ENODATA
) {
4132 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4133 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4134 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4138 hci_dev_unlock(hdev
);
4142 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4145 struct mgmt_cp_confirm_name
*cp
= data
;
4146 struct inquiry_entry
*e
;
4149 BT_DBG("%s", hdev
->name
);
4153 if (!hci_discovery_active(hdev
)) {
4154 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4155 MGMT_STATUS_FAILED
, &cp
->addr
,
4160 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4162 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4163 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4168 if (cp
->name_known
) {
4169 e
->name_state
= NAME_KNOWN
;
4172 e
->name_state
= NAME_NEEDED
;
4173 hci_inquiry_cache_update_resolve(hdev
, e
);
4176 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
4180 hci_dev_unlock(hdev
);
4184 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4187 struct mgmt_cp_block_device
*cp
= data
;
4191 BT_DBG("%s", hdev
->name
);
4193 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4194 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4195 MGMT_STATUS_INVALID_PARAMS
,
4196 &cp
->addr
, sizeof(cp
->addr
));
4200 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4203 status
= MGMT_STATUS_FAILED
;
4207 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4209 status
= MGMT_STATUS_SUCCESS
;
4212 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4213 &cp
->addr
, sizeof(cp
->addr
));
4215 hci_dev_unlock(hdev
);
4220 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4223 struct mgmt_cp_unblock_device
*cp
= data
;
4227 BT_DBG("%s", hdev
->name
);
4229 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4230 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4231 MGMT_STATUS_INVALID_PARAMS
,
4232 &cp
->addr
, sizeof(cp
->addr
));
4236 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4239 status
= MGMT_STATUS_INVALID_PARAMS
;
4243 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4245 status
= MGMT_STATUS_SUCCESS
;
4248 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4249 &cp
->addr
, sizeof(cp
->addr
));
4251 hci_dev_unlock(hdev
);
4256 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4259 struct mgmt_cp_set_device_id
*cp
= data
;
4260 struct hci_request req
;
4264 BT_DBG("%s", hdev
->name
);
4266 source
= __le16_to_cpu(cp
->source
);
4268 if (source
> 0x0002)
4269 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4270 MGMT_STATUS_INVALID_PARAMS
);
4274 hdev
->devid_source
= source
;
4275 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4276 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4277 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4279 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
4281 hci_req_init(&req
, hdev
);
4283 hci_req_run(&req
, NULL
);
4285 hci_dev_unlock(hdev
);
4290 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
4292 struct cmd_lookup match
= { NULL
, hdev
};
4297 u8 mgmt_err
= mgmt_status(status
);
4299 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4300 cmd_status_rsp
, &mgmt_err
);
4304 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
4305 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4307 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4309 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4312 new_settings(hdev
, match
.sk
);
4318 hci_dev_unlock(hdev
);
4321 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4324 struct mgmt_mode
*cp
= data
;
4325 struct pending_cmd
*cmd
;
4326 struct hci_request req
;
4327 u8 val
, enabled
, status
;
4330 BT_DBG("request for %s", hdev
->name
);
4332 status
= mgmt_le_support(hdev
);
4334 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4337 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4338 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4339 MGMT_STATUS_INVALID_PARAMS
);
4344 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4346 /* The following conditions are ones which mean that we should
4347 * not do any HCI communication but directly send a mgmt
4348 * response to user space (after toggling the flag if
4351 if (!hdev_is_powered(hdev
) || val
== enabled
||
4352 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4353 (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4354 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4355 bool changed
= false;
4357 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
4358 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4362 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4367 err
= new_settings(hdev
, sk
);
4372 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4373 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
4374 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4379 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4385 hci_req_init(&req
, hdev
);
4388 enable_advertising(&req
);
4390 disable_advertising(&req
);
4392 err
= hci_req_run(&req
, set_advertising_complete
);
4394 mgmt_pending_remove(cmd
);
4397 hci_dev_unlock(hdev
);
4401 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4402 void *data
, u16 len
)
4404 struct mgmt_cp_set_static_address
*cp
= data
;
4407 BT_DBG("%s", hdev
->name
);
4409 if (!lmp_le_capable(hdev
))
4410 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4411 MGMT_STATUS_NOT_SUPPORTED
);
4413 if (hdev_is_powered(hdev
))
4414 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4415 MGMT_STATUS_REJECTED
);
4417 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4418 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4419 return cmd_status(sk
, hdev
->id
,
4420 MGMT_OP_SET_STATIC_ADDRESS
,
4421 MGMT_STATUS_INVALID_PARAMS
);
4423 /* Two most significant bits shall be set */
4424 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4425 return cmd_status(sk
, hdev
->id
,
4426 MGMT_OP_SET_STATIC_ADDRESS
,
4427 MGMT_STATUS_INVALID_PARAMS
);
4432 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4434 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
4436 hci_dev_unlock(hdev
);
4441 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4442 void *data
, u16 len
)
4444 struct mgmt_cp_set_scan_params
*cp
= data
;
4445 __u16 interval
, window
;
4448 BT_DBG("%s", hdev
->name
);
4450 if (!lmp_le_capable(hdev
))
4451 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4452 MGMT_STATUS_NOT_SUPPORTED
);
4454 interval
= __le16_to_cpu(cp
->interval
);
4456 if (interval
< 0x0004 || interval
> 0x4000)
4457 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4458 MGMT_STATUS_INVALID_PARAMS
);
4460 window
= __le16_to_cpu(cp
->window
);
4462 if (window
< 0x0004 || window
> 0x4000)
4463 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4464 MGMT_STATUS_INVALID_PARAMS
);
4466 if (window
> interval
)
4467 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4468 MGMT_STATUS_INVALID_PARAMS
);
4472 hdev
->le_scan_interval
= interval
;
4473 hdev
->le_scan_window
= window
;
4475 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4477 /* If background scan is running, restart it so new parameters are
4480 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4481 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4482 struct hci_request req
;
4484 hci_req_init(&req
, hdev
);
4486 hci_req_add_le_scan_disable(&req
);
4487 hci_req_add_le_passive_scan(&req
);
4489 hci_req_run(&req
, NULL
);
4492 hci_dev_unlock(hdev
);
4497 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4499 struct pending_cmd
*cmd
;
4501 BT_DBG("status 0x%02x", status
);
4505 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4510 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4511 mgmt_status(status
));
4513 struct mgmt_mode
*cp
= cmd
->param
;
4516 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4518 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4520 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4521 new_settings(hdev
, cmd
->sk
);
4524 mgmt_pending_remove(cmd
);
4527 hci_dev_unlock(hdev
);
4530 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4531 void *data
, u16 len
)
4533 struct mgmt_mode
*cp
= data
;
4534 struct pending_cmd
*cmd
;
4535 struct hci_request req
;
4538 BT_DBG("%s", hdev
->name
);
4540 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4541 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4542 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4543 MGMT_STATUS_NOT_SUPPORTED
);
4545 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4546 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4547 MGMT_STATUS_INVALID_PARAMS
);
4549 if (!hdev_is_powered(hdev
))
4550 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4551 MGMT_STATUS_NOT_POWERED
);
4553 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4554 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4555 MGMT_STATUS_REJECTED
);
4559 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4560 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4565 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4566 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4571 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4578 hci_req_init(&req
, hdev
);
4580 write_fast_connectable(&req
, cp
->val
);
4582 err
= hci_req_run(&req
, fast_connectable_complete
);
4584 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4585 MGMT_STATUS_FAILED
);
4586 mgmt_pending_remove(cmd
);
4590 hci_dev_unlock(hdev
);
4595 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4597 struct pending_cmd
*cmd
;
4599 BT_DBG("status 0x%02x", status
);
4603 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4608 u8 mgmt_err
= mgmt_status(status
);
4610 /* We need to restore the flag if related HCI commands
4613 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4615 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4617 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4618 new_settings(hdev
, cmd
->sk
);
4621 mgmt_pending_remove(cmd
);
4624 hci_dev_unlock(hdev
);
4627 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4629 struct mgmt_mode
*cp
= data
;
4630 struct pending_cmd
*cmd
;
4631 struct hci_request req
;
4634 BT_DBG("request for %s", hdev
->name
);
4636 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4637 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4638 MGMT_STATUS_NOT_SUPPORTED
);
4640 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4641 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4642 MGMT_STATUS_REJECTED
);
4644 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4645 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4646 MGMT_STATUS_INVALID_PARAMS
);
4650 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4651 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4655 if (!hdev_is_powered(hdev
)) {
4657 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4658 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4659 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4660 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4661 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4664 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4666 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4670 err
= new_settings(hdev
, sk
);
4674 /* Reject disabling when powered on */
4676 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4677 MGMT_STATUS_REJECTED
);
4681 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4682 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4687 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4693 /* We need to flip the bit already here so that update_adv_data
4694 * generates the correct flags.
4696 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4698 hci_req_init(&req
, hdev
);
4700 write_fast_connectable(&req
, false);
4701 __hci_update_page_scan(&req
);
4703 /* Since only the advertising data flags will change, there
4704 * is no need to update the scan response data.
4706 update_adv_data(&req
);
4708 err
= hci_req_run(&req
, set_bredr_complete
);
4710 mgmt_pending_remove(cmd
);
4713 hci_dev_unlock(hdev
);
4717 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4718 void *data
, u16 len
)
4720 struct mgmt_mode
*cp
= data
;
4721 struct pending_cmd
*cmd
;
4725 BT_DBG("request for %s", hdev
->name
);
4727 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
4728 !lmp_sc_capable(hdev
) && !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4729 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4730 MGMT_STATUS_NOT_SUPPORTED
);
4732 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4733 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4734 MGMT_STATUS_INVALID_PARAMS
);
4738 if (!hdev_is_powered(hdev
) ||
4739 (!lmp_sc_capable(hdev
) &&
4740 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
)) ||
4741 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4745 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4747 if (cp
->val
== 0x02)
4748 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4750 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4752 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4754 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4757 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4762 err
= new_settings(hdev
, sk
);
4767 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4768 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4775 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4776 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4777 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4781 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4787 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4789 mgmt_pending_remove(cmd
);
4793 if (cp
->val
== 0x02)
4794 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4796 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4799 hci_dev_unlock(hdev
);
4803 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4804 void *data
, u16 len
)
4806 struct mgmt_mode
*cp
= data
;
4807 bool changed
, use_changed
;
4810 BT_DBG("request for %s", hdev
->name
);
4812 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4813 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4814 MGMT_STATUS_INVALID_PARAMS
);
4819 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4822 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4825 if (cp
->val
== 0x02)
4826 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4829 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4832 if (hdev_is_powered(hdev
) && use_changed
&&
4833 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4834 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4835 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4836 sizeof(mode
), &mode
);
4839 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4844 err
= new_settings(hdev
, sk
);
4847 hci_dev_unlock(hdev
);
4851 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4854 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4858 BT_DBG("request for %s", hdev
->name
);
4860 if (!lmp_le_capable(hdev
))
4861 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4862 MGMT_STATUS_NOT_SUPPORTED
);
4864 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4865 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4866 MGMT_STATUS_INVALID_PARAMS
);
4868 if (hdev_is_powered(hdev
))
4869 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4870 MGMT_STATUS_REJECTED
);
4874 /* If user space supports this command it is also expected to
4875 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4877 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4880 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4881 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4882 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4884 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4885 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4886 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4889 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4894 err
= new_settings(hdev
, sk
);
4897 hci_dev_unlock(hdev
);
4901 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4903 switch (irk
->addr
.type
) {
4904 case BDADDR_LE_PUBLIC
:
4907 case BDADDR_LE_RANDOM
:
4908 /* Two most significant bits shall be set */
4909 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4917 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4920 struct mgmt_cp_load_irks
*cp
= cp_data
;
4921 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4922 sizeof(struct mgmt_irk_info
));
4923 u16 irk_count
, expected_len
;
4926 BT_DBG("request for %s", hdev
->name
);
4928 if (!lmp_le_capable(hdev
))
4929 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4930 MGMT_STATUS_NOT_SUPPORTED
);
4932 irk_count
= __le16_to_cpu(cp
->irk_count
);
4933 if (irk_count
> max_irk_count
) {
4934 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4935 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4936 MGMT_STATUS_INVALID_PARAMS
);
4939 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4940 if (expected_len
!= len
) {
4941 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4943 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4944 MGMT_STATUS_INVALID_PARAMS
);
4947 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4949 for (i
= 0; i
< irk_count
; i
++) {
4950 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4952 if (!irk_is_valid(key
))
4953 return cmd_status(sk
, hdev
->id
,
4955 MGMT_STATUS_INVALID_PARAMS
);
4960 hci_smp_irks_clear(hdev
);
4962 for (i
= 0; i
< irk_count
; i
++) {
4963 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4966 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4967 addr_type
= ADDR_LE_DEV_PUBLIC
;
4969 addr_type
= ADDR_LE_DEV_RANDOM
;
4971 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4975 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4977 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4979 hci_dev_unlock(hdev
);
4984 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4986 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4989 switch (key
->addr
.type
) {
4990 case BDADDR_LE_PUBLIC
:
4993 case BDADDR_LE_RANDOM
:
4994 /* Two most significant bits shall be set */
4995 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5003 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5004 void *cp_data
, u16 len
)
5006 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5007 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5008 sizeof(struct mgmt_ltk_info
));
5009 u16 key_count
, expected_len
;
5012 BT_DBG("request for %s", hdev
->name
);
5014 if (!lmp_le_capable(hdev
))
5015 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5016 MGMT_STATUS_NOT_SUPPORTED
);
5018 key_count
= __le16_to_cpu(cp
->key_count
);
5019 if (key_count
> max_key_count
) {
5020 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5021 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5022 MGMT_STATUS_INVALID_PARAMS
);
5025 expected_len
= sizeof(*cp
) + key_count
*
5026 sizeof(struct mgmt_ltk_info
);
5027 if (expected_len
!= len
) {
5028 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5030 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5031 MGMT_STATUS_INVALID_PARAMS
);
5034 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5036 for (i
= 0; i
< key_count
; i
++) {
5037 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5039 if (!ltk_is_valid(key
))
5040 return cmd_status(sk
, hdev
->id
,
5041 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5042 MGMT_STATUS_INVALID_PARAMS
);
5047 hci_smp_ltks_clear(hdev
);
5049 for (i
= 0; i
< key_count
; i
++) {
5050 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5051 u8 type
, addr_type
, authenticated
;
5053 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
5054 addr_type
= ADDR_LE_DEV_PUBLIC
;
5056 addr_type
= ADDR_LE_DEV_RANDOM
;
5058 switch (key
->type
) {
5059 case MGMT_LTK_UNAUTHENTICATED
:
5060 authenticated
= 0x00;
5061 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5063 case MGMT_LTK_AUTHENTICATED
:
5064 authenticated
= 0x01;
5065 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5067 case MGMT_LTK_P256_UNAUTH
:
5068 authenticated
= 0x00;
5069 type
= SMP_LTK_P256
;
5071 case MGMT_LTK_P256_AUTH
:
5072 authenticated
= 0x01;
5073 type
= SMP_LTK_P256
;
5075 case MGMT_LTK_P256_DEBUG
:
5076 authenticated
= 0x00;
5077 type
= SMP_LTK_P256_DEBUG
;
5082 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
5083 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
5087 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5090 hci_dev_unlock(hdev
);
5095 static void conn_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5097 struct hci_conn
*conn
= cmd
->user_data
;
5098 struct mgmt_rp_get_conn_info rp
;
5100 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5102 if (status
== MGMT_STATUS_SUCCESS
) {
5103 rp
.rssi
= conn
->rssi
;
5104 rp
.tx_power
= conn
->tx_power
;
5105 rp
.max_tx_power
= conn
->max_tx_power
;
5107 rp
.rssi
= HCI_RSSI_INVALID
;
5108 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5109 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5112 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
, status
,
5115 hci_conn_drop(conn
);
5119 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
)
5121 struct hci_cp_read_rssi
*cp
;
5122 struct pending_cmd
*cmd
;
5123 struct hci_conn
*conn
;
5127 BT_DBG("status 0x%02x", hci_status
);
5131 /* Commands sent in request are either Read RSSI or Read Transmit Power
5132 * Level so we check which one was last sent to retrieve connection
5133 * handle. Both commands have handle as first parameter so it's safe to
5134 * cast data on the same command struct.
5136 * First command sent is always Read RSSI and we fail only if it fails.
5137 * In other case we simply override error to indicate success as we
5138 * already remembered if TX power value is actually valid.
5140 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5142 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5143 status
= MGMT_STATUS_SUCCESS
;
5145 status
= mgmt_status(hci_status
);
5149 BT_ERR("invalid sent_cmd in conn_info response");
5153 handle
= __le16_to_cpu(cp
->handle
);
5154 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5156 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5160 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5164 cmd
->cmd_complete(cmd
, status
);
5165 mgmt_pending_remove(cmd
);
5168 hci_dev_unlock(hdev
);
5171 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5174 struct mgmt_cp_get_conn_info
*cp
= data
;
5175 struct mgmt_rp_get_conn_info rp
;
5176 struct hci_conn
*conn
;
5177 unsigned long conn_info_age
;
5180 BT_DBG("%s", hdev
->name
);
5182 memset(&rp
, 0, sizeof(rp
));
5183 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5184 rp
.addr
.type
= cp
->addr
.type
;
5186 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5187 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5188 MGMT_STATUS_INVALID_PARAMS
,
5193 if (!hdev_is_powered(hdev
)) {
5194 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5195 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5199 if (cp
->addr
.type
== BDADDR_BREDR
)
5200 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5203 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5205 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5206 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5207 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
5211 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5212 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5213 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5217 /* To avoid client trying to guess when to poll again for information we
5218 * calculate conn info age as random value between min/max set in hdev.
5220 conn_info_age
= hdev
->conn_info_min_age
+
5221 prandom_u32_max(hdev
->conn_info_max_age
-
5222 hdev
->conn_info_min_age
);
5224 /* Query controller to refresh cached values if they are too old or were
5227 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5228 msecs_to_jiffies(conn_info_age
)) ||
5229 !conn
->conn_info_timestamp
) {
5230 struct hci_request req
;
5231 struct hci_cp_read_tx_power req_txp_cp
;
5232 struct hci_cp_read_rssi req_rssi_cp
;
5233 struct pending_cmd
*cmd
;
5235 hci_req_init(&req
, hdev
);
5236 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5237 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5240 /* For LE links TX power does not change thus we don't need to
5241 * query for it once value is known.
5243 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5244 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5245 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5246 req_txp_cp
.type
= 0x00;
5247 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5248 sizeof(req_txp_cp
), &req_txp_cp
);
5251 /* Max TX power needs to be read only once per connection */
5252 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5253 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5254 req_txp_cp
.type
= 0x01;
5255 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5256 sizeof(req_txp_cp
), &req_txp_cp
);
5259 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5263 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5270 hci_conn_hold(conn
);
5271 cmd
->user_data
= hci_conn_get(conn
);
5272 cmd
->cmd_complete
= conn_info_cmd_complete
;
5274 conn
->conn_info_timestamp
= jiffies
;
5276 /* Cache is valid, just reply with values cached in hci_conn */
5277 rp
.rssi
= conn
->rssi
;
5278 rp
.tx_power
= conn
->tx_power
;
5279 rp
.max_tx_power
= conn
->max_tx_power
;
5281 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5282 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5286 hci_dev_unlock(hdev
);
5290 static void clock_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5292 struct hci_conn
*conn
= cmd
->user_data
;
5293 struct mgmt_rp_get_clock_info rp
;
5294 struct hci_dev
*hdev
;
5296 memset(&rp
, 0, sizeof(rp
));
5297 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5302 hdev
= hci_dev_get(cmd
->index
);
5304 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5309 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5310 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5314 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
, sizeof(rp
));
5317 hci_conn_drop(conn
);
5322 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
5324 struct hci_cp_read_clock
*hci_cp
;
5325 struct pending_cmd
*cmd
;
5326 struct hci_conn
*conn
;
5328 BT_DBG("%s status %u", hdev
->name
, status
);
5332 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5336 if (hci_cp
->which
) {
5337 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5338 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5343 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5347 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5348 mgmt_pending_remove(cmd
);
5351 hci_dev_unlock(hdev
);
5354 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5357 struct mgmt_cp_get_clock_info
*cp
= data
;
5358 struct mgmt_rp_get_clock_info rp
;
5359 struct hci_cp_read_clock hci_cp
;
5360 struct pending_cmd
*cmd
;
5361 struct hci_request req
;
5362 struct hci_conn
*conn
;
5365 BT_DBG("%s", hdev
->name
);
5367 memset(&rp
, 0, sizeof(rp
));
5368 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5369 rp
.addr
.type
= cp
->addr
.type
;
5371 if (cp
->addr
.type
!= BDADDR_BREDR
)
5372 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5373 MGMT_STATUS_INVALID_PARAMS
,
5378 if (!hdev_is_powered(hdev
)) {
5379 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5380 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5384 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5385 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5387 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5388 err
= cmd_complete(sk
, hdev
->id
,
5389 MGMT_OP_GET_CLOCK_INFO
,
5390 MGMT_STATUS_NOT_CONNECTED
,
5398 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5404 cmd
->cmd_complete
= clock_info_cmd_complete
;
5406 hci_req_init(&req
, hdev
);
5408 memset(&hci_cp
, 0, sizeof(hci_cp
));
5409 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5412 hci_conn_hold(conn
);
5413 cmd
->user_data
= hci_conn_get(conn
);
5415 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5416 hci_cp
.which
= 0x01; /* Piconet clock */
5417 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5420 err
= hci_req_run(&req
, get_clock_info_complete
);
5422 mgmt_pending_remove(cmd
);
5425 hci_dev_unlock(hdev
);
5429 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5430 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5432 struct mgmt_ev_device_added ev
;
5434 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5435 ev
.addr
.type
= type
;
5438 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5441 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5442 void *data
, u16 len
)
5444 struct mgmt_cp_add_device
*cp
= data
;
5445 u8 auto_conn
, addr_type
;
5448 BT_DBG("%s", hdev
->name
);
5450 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5451 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5452 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5453 MGMT_STATUS_INVALID_PARAMS
,
5454 &cp
->addr
, sizeof(cp
->addr
));
5456 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5457 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5458 MGMT_STATUS_INVALID_PARAMS
,
5459 &cp
->addr
, sizeof(cp
->addr
));
5463 if (cp
->addr
.type
== BDADDR_BREDR
) {
5464 /* Only incoming connections action is supported for now */
5465 if (cp
->action
!= 0x01) {
5466 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5467 MGMT_STATUS_INVALID_PARAMS
,
5468 &cp
->addr
, sizeof(cp
->addr
));
5472 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5477 hci_update_page_scan(hdev
);
5482 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5483 addr_type
= ADDR_LE_DEV_PUBLIC
;
5485 addr_type
= ADDR_LE_DEV_RANDOM
;
5487 if (cp
->action
== 0x02)
5488 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5489 else if (cp
->action
== 0x01)
5490 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5492 auto_conn
= HCI_AUTO_CONN_REPORT
;
5494 /* If the connection parameters don't exist for this device,
5495 * they will be created and configured with defaults.
5497 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5499 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5501 &cp
->addr
, sizeof(cp
->addr
));
5506 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5508 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5509 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5512 hci_dev_unlock(hdev
);
5516 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5517 bdaddr_t
*bdaddr
, u8 type
)
5519 struct mgmt_ev_device_removed ev
;
5521 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5522 ev
.addr
.type
= type
;
5524 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5527 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5528 void *data
, u16 len
)
5530 struct mgmt_cp_remove_device
*cp
= data
;
5533 BT_DBG("%s", hdev
->name
);
5537 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5538 struct hci_conn_params
*params
;
5541 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5542 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5543 MGMT_STATUS_INVALID_PARAMS
,
5544 &cp
->addr
, sizeof(cp
->addr
));
5548 if (cp
->addr
.type
== BDADDR_BREDR
) {
5549 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5553 err
= cmd_complete(sk
, hdev
->id
,
5554 MGMT_OP_REMOVE_DEVICE
,
5555 MGMT_STATUS_INVALID_PARAMS
,
5556 &cp
->addr
, sizeof(cp
->addr
));
5560 hci_update_page_scan(hdev
);
5562 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5567 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5568 addr_type
= ADDR_LE_DEV_PUBLIC
;
5570 addr_type
= ADDR_LE_DEV_RANDOM
;
5572 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5575 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5576 MGMT_STATUS_INVALID_PARAMS
,
5577 &cp
->addr
, sizeof(cp
->addr
));
5581 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5582 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5583 MGMT_STATUS_INVALID_PARAMS
,
5584 &cp
->addr
, sizeof(cp
->addr
));
5588 list_del(¶ms
->action
);
5589 list_del(¶ms
->list
);
5591 hci_update_background_scan(hdev
);
5593 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5595 struct hci_conn_params
*p
, *tmp
;
5596 struct bdaddr_list
*b
, *btmp
;
5598 if (cp
->addr
.type
) {
5599 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5600 MGMT_STATUS_INVALID_PARAMS
,
5601 &cp
->addr
, sizeof(cp
->addr
));
5605 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5606 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5611 hci_update_page_scan(hdev
);
5613 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5614 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5616 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5617 list_del(&p
->action
);
5622 BT_DBG("All LE connection parameters were removed");
5624 hci_update_background_scan(hdev
);
5628 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5629 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5632 hci_dev_unlock(hdev
);
5636 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5639 struct mgmt_cp_load_conn_param
*cp
= data
;
5640 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5641 sizeof(struct mgmt_conn_param
));
5642 u16 param_count
, expected_len
;
5645 if (!lmp_le_capable(hdev
))
5646 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5647 MGMT_STATUS_NOT_SUPPORTED
);
5649 param_count
= __le16_to_cpu(cp
->param_count
);
5650 if (param_count
> max_param_count
) {
5651 BT_ERR("load_conn_param: too big param_count value %u",
5653 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5654 MGMT_STATUS_INVALID_PARAMS
);
5657 expected_len
= sizeof(*cp
) + param_count
*
5658 sizeof(struct mgmt_conn_param
);
5659 if (expected_len
!= len
) {
5660 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5662 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5663 MGMT_STATUS_INVALID_PARAMS
);
5666 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5670 hci_conn_params_clear_disabled(hdev
);
5672 for (i
= 0; i
< param_count
; i
++) {
5673 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5674 struct hci_conn_params
*hci_param
;
5675 u16 min
, max
, latency
, timeout
;
5678 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5681 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5682 addr_type
= ADDR_LE_DEV_PUBLIC
;
5683 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5684 addr_type
= ADDR_LE_DEV_RANDOM
;
5686 BT_ERR("Ignoring invalid connection parameters");
5690 min
= le16_to_cpu(param
->min_interval
);
5691 max
= le16_to_cpu(param
->max_interval
);
5692 latency
= le16_to_cpu(param
->latency
);
5693 timeout
= le16_to_cpu(param
->timeout
);
5695 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5696 min
, max
, latency
, timeout
);
5698 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5699 BT_ERR("Ignoring invalid connection parameters");
5703 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5706 BT_ERR("Failed to add connection parameters");
5710 hci_param
->conn_min_interval
= min
;
5711 hci_param
->conn_max_interval
= max
;
5712 hci_param
->conn_latency
= latency
;
5713 hci_param
->supervision_timeout
= timeout
;
5716 hci_dev_unlock(hdev
);
5718 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5721 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5722 void *data
, u16 len
)
5724 struct mgmt_cp_set_external_config
*cp
= data
;
5728 BT_DBG("%s", hdev
->name
);
5730 if (hdev_is_powered(hdev
))
5731 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5732 MGMT_STATUS_REJECTED
);
5734 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5735 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5736 MGMT_STATUS_INVALID_PARAMS
);
5738 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5739 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5740 MGMT_STATUS_NOT_SUPPORTED
);
5745 changed
= !test_and_set_bit(HCI_EXT_CONFIGURED
,
5748 changed
= test_and_clear_bit(HCI_EXT_CONFIGURED
,
5751 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5758 err
= new_options(hdev
, sk
);
5760 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) == is_configured(hdev
)) {
5761 mgmt_index_removed(hdev
);
5763 if (test_and_change_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
5764 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5765 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5767 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5769 set_bit(HCI_RAW
, &hdev
->flags
);
5770 mgmt_index_added(hdev
);
5775 hci_dev_unlock(hdev
);
5779 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
5780 void *data
, u16 len
)
5782 struct mgmt_cp_set_public_address
*cp
= data
;
5786 BT_DBG("%s", hdev
->name
);
5788 if (hdev_is_powered(hdev
))
5789 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5790 MGMT_STATUS_REJECTED
);
5792 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
5793 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5794 MGMT_STATUS_INVALID_PARAMS
);
5796 if (!hdev
->set_bdaddr
)
5797 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5798 MGMT_STATUS_NOT_SUPPORTED
);
5802 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
5803 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
5805 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
5812 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5813 err
= new_options(hdev
, sk
);
5815 if (is_configured(hdev
)) {
5816 mgmt_index_removed(hdev
);
5818 clear_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
5820 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5821 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5823 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5827 hci_dev_unlock(hdev
);
5831 static const struct mgmt_handler
{
5832 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5836 } mgmt_handlers
[] = {
5837 { NULL
}, /* 0x0000 (no command) */
5838 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5839 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5840 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5841 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5842 { set_powered
, false, MGMT_SETTING_SIZE
},
5843 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5844 { set_connectable
, false, MGMT_SETTING_SIZE
},
5845 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5846 { set_bondable
, false, MGMT_SETTING_SIZE
},
5847 { set_link_security
, false, MGMT_SETTING_SIZE
},
5848 { set_ssp
, false, MGMT_SETTING_SIZE
},
5849 { set_hs
, false, MGMT_SETTING_SIZE
},
5850 { set_le
, false, MGMT_SETTING_SIZE
},
5851 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5852 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5853 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5854 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5855 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5856 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5857 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5858 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5859 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5860 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5861 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5862 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5863 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5864 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5865 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5866 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5867 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5868 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5869 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5870 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5871 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5872 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5873 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5874 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5875 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5876 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5877 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5878 { set_advertising
, false, MGMT_SETTING_SIZE
},
5879 { set_bredr
, false, MGMT_SETTING_SIZE
},
5880 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5881 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5882 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5883 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5884 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5885 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5886 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5887 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5888 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5889 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5890 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
5891 { read_unconf_index_list
, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE
},
5892 { read_config_info
, false, MGMT_READ_CONFIG_INFO_SIZE
},
5893 { set_external_config
, false, MGMT_SET_EXTERNAL_CONFIG_SIZE
},
5894 { set_public_address
, false, MGMT_SET_PUBLIC_ADDRESS_SIZE
},
5895 { start_service_discovery
,true, MGMT_START_SERVICE_DISCOVERY_SIZE
},
5898 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5902 struct mgmt_hdr
*hdr
;
5903 u16 opcode
, index
, len
;
5904 struct hci_dev
*hdev
= NULL
;
5905 const struct mgmt_handler
*handler
;
5908 BT_DBG("got %zu bytes", msglen
);
5910 if (msglen
< sizeof(*hdr
))
5913 buf
= kmalloc(msglen
, GFP_KERNEL
);
5917 if (memcpy_from_msg(buf
, msg
, msglen
)) {
5923 opcode
= __le16_to_cpu(hdr
->opcode
);
5924 index
= __le16_to_cpu(hdr
->index
);
5925 len
= __le16_to_cpu(hdr
->len
);
5927 if (len
!= msglen
- sizeof(*hdr
)) {
5932 if (index
!= MGMT_INDEX_NONE
) {
5933 hdev
= hci_dev_get(index
);
5935 err
= cmd_status(sk
, index
, opcode
,
5936 MGMT_STATUS_INVALID_INDEX
);
5940 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5941 test_bit(HCI_CONFIG
, &hdev
->dev_flags
) ||
5942 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5943 err
= cmd_status(sk
, index
, opcode
,
5944 MGMT_STATUS_INVALID_INDEX
);
5948 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
5949 opcode
!= MGMT_OP_READ_CONFIG_INFO
&&
5950 opcode
!= MGMT_OP_SET_EXTERNAL_CONFIG
&&
5951 opcode
!= MGMT_OP_SET_PUBLIC_ADDRESS
) {
5952 err
= cmd_status(sk
, index
, opcode
,
5953 MGMT_STATUS_INVALID_INDEX
);
5958 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5959 mgmt_handlers
[opcode
].func
== NULL
) {
5960 BT_DBG("Unknown op %u", opcode
);
5961 err
= cmd_status(sk
, index
, opcode
,
5962 MGMT_STATUS_UNKNOWN_COMMAND
);
5966 if (hdev
&& (opcode
<= MGMT_OP_READ_INDEX_LIST
||
5967 opcode
== MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5968 err
= cmd_status(sk
, index
, opcode
,
5969 MGMT_STATUS_INVALID_INDEX
);
5973 if (!hdev
&& (opcode
> MGMT_OP_READ_INDEX_LIST
&&
5974 opcode
!= MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5975 err
= cmd_status(sk
, index
, opcode
,
5976 MGMT_STATUS_INVALID_INDEX
);
5980 handler
= &mgmt_handlers
[opcode
];
5982 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5983 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5984 err
= cmd_status(sk
, index
, opcode
,
5985 MGMT_STATUS_INVALID_PARAMS
);
5990 mgmt_init_hdev(sk
, hdev
);
5992 cp
= buf
+ sizeof(*hdr
);
5994 err
= handler
->func(sk
, hdev
, cp
, len
);
6008 void mgmt_index_added(struct hci_dev
*hdev
)
6010 if (hdev
->dev_type
!= HCI_BREDR
)
6013 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6016 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6017 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6019 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6022 void mgmt_index_removed(struct hci_dev
*hdev
)
6024 u8 status
= MGMT_STATUS_INVALID_INDEX
;
6026 if (hdev
->dev_type
!= HCI_BREDR
)
6029 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6032 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6034 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6035 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6037 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6040 /* This function requires the caller holds hdev->lock */
6041 static void restart_le_actions(struct hci_dev
*hdev
)
6043 struct hci_conn_params
*p
;
6045 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
6046 /* Needed for AUTO_OFF case where might not "really"
6047 * have been powered off.
6049 list_del_init(&p
->action
);
6051 switch (p
->auto_connect
) {
6052 case HCI_AUTO_CONN_DIRECT
:
6053 case HCI_AUTO_CONN_ALWAYS
:
6054 list_add(&p
->action
, &hdev
->pend_le_conns
);
6056 case HCI_AUTO_CONN_REPORT
:
6057 list_add(&p
->action
, &hdev
->pend_le_reports
);
6064 hci_update_background_scan(hdev
);
6067 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
6069 struct cmd_lookup match
= { NULL
, hdev
};
6071 BT_DBG("status 0x%02x", status
);
6075 restart_le_actions(hdev
);
6077 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6079 new_settings(hdev
, match
.sk
);
6081 hci_dev_unlock(hdev
);
6087 static int powered_update_hci(struct hci_dev
*hdev
)
6089 struct hci_request req
;
6092 hci_req_init(&req
, hdev
);
6094 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
6095 !lmp_host_ssp_capable(hdev
)) {
6098 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
6101 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
6103 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, sizeof(sc
), &sc
);
6106 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
6107 lmp_bredr_capable(hdev
)) {
6108 struct hci_cp_write_le_host_supported cp
;
6113 /* Check first if we already have the right
6114 * host state (host features set)
6116 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
6117 cp
.simul
!= lmp_host_le_br_capable(hdev
))
6118 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
6122 if (lmp_le_capable(hdev
)) {
6123 /* Make sure the controller has a good default for
6124 * advertising data. This also applies to the case
6125 * where BR/EDR was toggled during the AUTO_OFF phase.
6127 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
6128 update_adv_data(&req
);
6129 update_scan_rsp_data(&req
);
6132 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6133 enable_advertising(&req
);
6136 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
6137 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
6138 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
6139 sizeof(link_sec
), &link_sec
);
6141 if (lmp_bredr_capable(hdev
)) {
6142 write_fast_connectable(&req
, false);
6143 __hci_update_page_scan(&req
);
6149 return hci_req_run(&req
, powered_complete
);
6152 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
6154 struct cmd_lookup match
= { NULL
, hdev
};
6155 u8 status
, zero_cod
[] = { 0, 0, 0 };
6158 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
6162 if (powered_update_hci(hdev
) == 0)
6165 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
6170 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6172 /* If the power off is because of hdev unregistration let
6173 * use the appropriate INVALID_INDEX status. Otherwise use
6174 * NOT_POWERED. We cover both scenarios here since later in
6175 * mgmt_index_removed() any hci_conn callbacks will have already
6176 * been triggered, potentially causing misleading DISCONNECTED
6179 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
))
6180 status
= MGMT_STATUS_INVALID_INDEX
;
6182 status
= MGMT_STATUS_NOT_POWERED
;
6184 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6186 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
6187 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6188 zero_cod
, sizeof(zero_cod
), NULL
);
6191 err
= new_settings(hdev
, match
.sk
);
6199 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6201 struct pending_cmd
*cmd
;
6204 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6208 if (err
== -ERFKILL
)
6209 status
= MGMT_STATUS_RFKILLED
;
6211 status
= MGMT_STATUS_FAILED
;
6213 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6215 mgmt_pending_remove(cmd
);
6218 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
6220 struct hci_request req
;
6224 /* When discoverable timeout triggers, then just make sure
6225 * the limited discoverable flag is cleared. Even in the case
6226 * of a timeout triggered from general discoverable, it is
6227 * safe to unconditionally clear the flag.
6229 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
6230 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
6232 hci_req_init(&req
, hdev
);
6233 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
6234 u8 scan
= SCAN_PAGE
;
6235 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
6236 sizeof(scan
), &scan
);
6239 update_adv_data(&req
);
6240 hci_req_run(&req
, NULL
);
6242 hdev
->discov_timeout
= 0;
6244 new_settings(hdev
, NULL
);
6246 hci_dev_unlock(hdev
);
6249 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6252 struct mgmt_ev_new_link_key ev
;
6254 memset(&ev
, 0, sizeof(ev
));
6256 ev
.store_hint
= persistent
;
6257 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6258 ev
.key
.addr
.type
= BDADDR_BREDR
;
6259 ev
.key
.type
= key
->type
;
6260 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6261 ev
.key
.pin_len
= key
->pin_len
;
6263 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6266 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6268 switch (ltk
->type
) {
6271 if (ltk
->authenticated
)
6272 return MGMT_LTK_AUTHENTICATED
;
6273 return MGMT_LTK_UNAUTHENTICATED
;
6275 if (ltk
->authenticated
)
6276 return MGMT_LTK_P256_AUTH
;
6277 return MGMT_LTK_P256_UNAUTH
;
6278 case SMP_LTK_P256_DEBUG
:
6279 return MGMT_LTK_P256_DEBUG
;
6282 return MGMT_LTK_UNAUTHENTICATED
;
6285 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6287 struct mgmt_ev_new_long_term_key ev
;
6289 memset(&ev
, 0, sizeof(ev
));
6291 /* Devices using resolvable or non-resolvable random addresses
6292 * without providing an indentity resolving key don't require
6293 * to store long term keys. Their addresses will change the
6296 * Only when a remote device provides an identity address
6297 * make sure the long term key is stored. If the remote
6298 * identity is known, the long term keys are internally
6299 * mapped to the identity address. So allow static random
6300 * and public addresses here.
6302 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6303 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6304 ev
.store_hint
= 0x00;
6306 ev
.store_hint
= persistent
;
6308 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6309 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6310 ev
.key
.type
= mgmt_ltk_type(key
);
6311 ev
.key
.enc_size
= key
->enc_size
;
6312 ev
.key
.ediv
= key
->ediv
;
6313 ev
.key
.rand
= key
->rand
;
6315 if (key
->type
== SMP_LTK
)
6318 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
6320 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6323 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
6325 struct mgmt_ev_new_irk ev
;
6327 memset(&ev
, 0, sizeof(ev
));
6329 /* For identity resolving keys from devices that are already
6330 * using a public address or static random address, do not
6331 * ask for storing this key. The identity resolving key really
6332 * is only mandatory for devices using resovlable random
6335 * Storing all identity resolving keys has the downside that
6336 * they will be also loaded on next boot of they system. More
6337 * identity resolving keys, means more time during scanning is
6338 * needed to actually resolve these addresses.
6340 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
6341 ev
.store_hint
= 0x01;
6343 ev
.store_hint
= 0x00;
6345 bacpy(&ev
.rpa
, &irk
->rpa
);
6346 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6347 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6348 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6350 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6353 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6356 struct mgmt_ev_new_csrk ev
;
6358 memset(&ev
, 0, sizeof(ev
));
6360 /* Devices using resolvable or non-resolvable random addresses
6361 * without providing an indentity resolving key don't require
6362 * to store signature resolving keys. Their addresses will change
6363 * the next time around.
6365 * Only when a remote device provides an identity address
6366 * make sure the signature resolving key is stored. So allow
6367 * static random and public addresses here.
6369 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6370 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6371 ev
.store_hint
= 0x00;
6373 ev
.store_hint
= persistent
;
6375 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6376 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6377 ev
.key
.master
= csrk
->master
;
6378 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6380 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6383 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6384 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6385 u16 max_interval
, u16 latency
, u16 timeout
)
6387 struct mgmt_ev_new_conn_param ev
;
6389 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6392 memset(&ev
, 0, sizeof(ev
));
6393 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6394 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6395 ev
.store_hint
= store_hint
;
6396 ev
.min_interval
= cpu_to_le16(min_interval
);
6397 ev
.max_interval
= cpu_to_le16(max_interval
);
6398 ev
.latency
= cpu_to_le16(latency
);
6399 ev
.timeout
= cpu_to_le16(timeout
);
6401 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6404 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6407 eir
[eir_len
++] = sizeof(type
) + data_len
;
6408 eir
[eir_len
++] = type
;
6409 memcpy(&eir
[eir_len
], data
, data_len
);
6410 eir_len
+= data_len
;
6415 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
6416 u32 flags
, u8
*name
, u8 name_len
)
6419 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6422 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
6423 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6425 ev
->flags
= __cpu_to_le32(flags
);
6427 /* We must ensure that the EIR Data fields are ordered and
6428 * unique. Keep it simple for now and avoid the problem by not
6429 * adding any BR/EDR data to the LE adv.
6431 if (conn
->le_adv_data_len
> 0) {
6432 memcpy(&ev
->eir
[eir_len
],
6433 conn
->le_adv_data
, conn
->le_adv_data_len
);
6434 eir_len
= conn
->le_adv_data_len
;
6437 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6440 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
6441 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6443 conn
->dev_class
, 3);
6446 ev
->eir_len
= cpu_to_le16(eir_len
);
6448 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6449 sizeof(*ev
) + eir_len
, NULL
);
6452 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
6454 struct sock
**sk
= data
;
6456 cmd
->cmd_complete(cmd
, 0);
6461 mgmt_pending_remove(cmd
);
6464 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
6466 struct hci_dev
*hdev
= data
;
6467 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6469 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6471 cmd
->cmd_complete(cmd
, 0);
6472 mgmt_pending_remove(cmd
);
6475 bool mgmt_powering_down(struct hci_dev
*hdev
)
6477 struct pending_cmd
*cmd
;
6478 struct mgmt_mode
*cp
;
6480 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6491 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6492 u8 link_type
, u8 addr_type
, u8 reason
,
6493 bool mgmt_connected
)
6495 struct mgmt_ev_device_disconnected ev
;
6496 struct sock
*sk
= NULL
;
6498 /* The connection is still in hci_conn_hash so test for 1
6499 * instead of 0 to know if this is the last one.
6501 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6502 cancel_delayed_work(&hdev
->power_off
);
6503 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6506 if (!mgmt_connected
)
6509 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6512 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6514 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6515 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6518 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6523 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6527 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6528 u8 link_type
, u8 addr_type
, u8 status
)
6530 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6531 struct mgmt_cp_disconnect
*cp
;
6532 struct pending_cmd
*cmd
;
6534 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6537 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
6543 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6546 if (cp
->addr
.type
!= bdaddr_type
)
6549 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6550 mgmt_pending_remove(cmd
);
6553 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6554 u8 addr_type
, u8 status
)
6556 struct mgmt_ev_connect_failed ev
;
6558 /* The connection is still in hci_conn_hash so test for 1
6559 * instead of 0 to know if this is the last one.
6561 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6562 cancel_delayed_work(&hdev
->power_off
);
6563 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6566 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6567 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6568 ev
.status
= mgmt_status(status
);
6570 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6573 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6575 struct mgmt_ev_pin_code_request ev
;
6577 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6578 ev
.addr
.type
= BDADDR_BREDR
;
6581 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6584 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6587 struct pending_cmd
*cmd
;
6589 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6593 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6594 mgmt_pending_remove(cmd
);
6597 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6600 struct pending_cmd
*cmd
;
6602 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6606 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6607 mgmt_pending_remove(cmd
);
6610 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6611 u8 link_type
, u8 addr_type
, u32 value
,
6614 struct mgmt_ev_user_confirm_request ev
;
6616 BT_DBG("%s", hdev
->name
);
6618 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6619 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6620 ev
.confirm_hint
= confirm_hint
;
6621 ev
.value
= cpu_to_le32(value
);
6623 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6627 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6628 u8 link_type
, u8 addr_type
)
6630 struct mgmt_ev_user_passkey_request ev
;
6632 BT_DBG("%s", hdev
->name
);
6634 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6635 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6637 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6641 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6642 u8 link_type
, u8 addr_type
, u8 status
,
6645 struct pending_cmd
*cmd
;
6647 cmd
= mgmt_pending_find(opcode
, hdev
);
6651 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6652 mgmt_pending_remove(cmd
);
6657 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6658 u8 link_type
, u8 addr_type
, u8 status
)
6660 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6661 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6664 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6665 u8 link_type
, u8 addr_type
, u8 status
)
6667 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6669 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6672 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6673 u8 link_type
, u8 addr_type
, u8 status
)
6675 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6676 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6679 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6680 u8 link_type
, u8 addr_type
, u8 status
)
6682 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6684 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6687 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6688 u8 link_type
, u8 addr_type
, u32 passkey
,
6691 struct mgmt_ev_passkey_notify ev
;
6693 BT_DBG("%s", hdev
->name
);
6695 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6696 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6697 ev
.passkey
= __cpu_to_le32(passkey
);
6698 ev
.entered
= entered
;
6700 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6703 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
6705 struct mgmt_ev_auth_failed ev
;
6706 struct pending_cmd
*cmd
;
6707 u8 status
= mgmt_status(hci_status
);
6709 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
6710 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6713 cmd
= find_pairing(conn
);
6715 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
6716 cmd
? cmd
->sk
: NULL
);
6719 cmd
->cmd_complete(cmd
, status
);
6720 mgmt_pending_remove(cmd
);
6724 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6726 struct cmd_lookup match
= { NULL
, hdev
};
6730 u8 mgmt_err
= mgmt_status(status
);
6731 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6732 cmd_status_rsp
, &mgmt_err
);
6736 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6737 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6740 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6743 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6747 new_settings(hdev
, match
.sk
);
6753 static void clear_eir(struct hci_request
*req
)
6755 struct hci_dev
*hdev
= req
->hdev
;
6756 struct hci_cp_write_eir cp
;
6758 if (!lmp_ext_inq_capable(hdev
))
6761 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6763 memset(&cp
, 0, sizeof(cp
));
6765 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6768 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6770 struct cmd_lookup match
= { NULL
, hdev
};
6771 struct hci_request req
;
6772 bool changed
= false;
6775 u8 mgmt_err
= mgmt_status(status
);
6777 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6778 &hdev
->dev_flags
)) {
6779 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6780 new_settings(hdev
, NULL
);
6783 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6789 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6791 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6793 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6796 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6799 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6802 new_settings(hdev
, match
.sk
);
6807 hci_req_init(&req
, hdev
);
6809 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6810 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6811 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6812 sizeof(enable
), &enable
);
6818 hci_req_run(&req
, NULL
);
6821 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6823 struct cmd_lookup match
= { NULL
, hdev
};
6824 bool changed
= false;
6827 u8 mgmt_err
= mgmt_status(status
);
6830 if (test_and_clear_bit(HCI_SC_ENABLED
,
6832 new_settings(hdev
, NULL
);
6833 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6836 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6837 cmd_status_rsp
, &mgmt_err
);
6842 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6844 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6845 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6848 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6849 settings_rsp
, &match
);
6852 new_settings(hdev
, match
.sk
);
6858 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6860 struct cmd_lookup
*match
= data
;
6862 if (match
->sk
== NULL
) {
6863 match
->sk
= cmd
->sk
;
6864 sock_hold(match
->sk
);
6868 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6871 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6873 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6874 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6875 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6878 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6885 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6887 struct mgmt_cp_set_local_name ev
;
6888 struct pending_cmd
*cmd
;
6893 memset(&ev
, 0, sizeof(ev
));
6894 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6895 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6897 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6899 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6901 /* If this is a HCI command related to powering on the
6902 * HCI dev don't send any mgmt signals.
6904 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6908 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6909 cmd
? cmd
->sk
: NULL
);
6912 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6913 u8
*rand192
, u8
*hash256
, u8
*rand256
,
6916 struct pending_cmd
*cmd
;
6918 BT_DBG("%s status %u", hdev
->name
, status
);
6920 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6925 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6926 mgmt_status(status
));
6928 if (bredr_sc_enabled(hdev
) && hash256
&& rand256
) {
6929 struct mgmt_rp_read_local_oob_ext_data rp
;
6931 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6932 memcpy(rp
.rand192
, rand192
, sizeof(rp
.rand192
));
6934 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6935 memcpy(rp
.rand256
, rand256
, sizeof(rp
.rand256
));
6937 cmd_complete(cmd
->sk
, hdev
->id
,
6938 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6941 struct mgmt_rp_read_local_oob_data rp
;
6943 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6944 memcpy(rp
.rand
, rand192
, sizeof(rp
.rand
));
6946 cmd_complete(cmd
->sk
, hdev
->id
,
6947 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6952 mgmt_pending_remove(cmd
);
6955 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
6959 for (i
= 0; i
< uuid_count
; i
++) {
6960 if (!memcmp(uuid
, uuids
[i
], 16))
6967 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
6971 while (parsed
< eir_len
) {
6972 u8 field_len
= eir
[0];
6979 if (eir_len
- parsed
< field_len
+ 1)
6983 case EIR_UUID16_ALL
:
6984 case EIR_UUID16_SOME
:
6985 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
6986 memcpy(uuid
, bluetooth_base_uuid
, 16);
6987 uuid
[13] = eir
[i
+ 3];
6988 uuid
[12] = eir
[i
+ 2];
6989 if (has_uuid(uuid
, uuid_count
, uuids
))
6993 case EIR_UUID32_ALL
:
6994 case EIR_UUID32_SOME
:
6995 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
6996 memcpy(uuid
, bluetooth_base_uuid
, 16);
6997 uuid
[15] = eir
[i
+ 5];
6998 uuid
[14] = eir
[i
+ 4];
6999 uuid
[13] = eir
[i
+ 3];
7000 uuid
[12] = eir
[i
+ 2];
7001 if (has_uuid(uuid
, uuid_count
, uuids
))
7005 case EIR_UUID128_ALL
:
7006 case EIR_UUID128_SOME
:
7007 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
7008 memcpy(uuid
, eir
+ i
+ 2, 16);
7009 if (has_uuid(uuid
, uuid_count
, uuids
))
7015 parsed
+= field_len
+ 1;
7016 eir
+= field_len
+ 1;
7022 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7023 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
7024 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7027 struct mgmt_ev_device_found
*ev
= (void *) buf
;
7031 /* Don't send events for a non-kernel initiated discovery. With
7032 * LE one exception is if we have pend_le_reports > 0 in which
7033 * case we're doing passive scanning and want these events.
7035 if (!hci_discovery_active(hdev
)) {
7036 if (link_type
== ACL_LINK
)
7038 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
7042 /* When using service discovery with a RSSI threshold, then check
7043 * if such a RSSI threshold is specified. If a RSSI threshold has
7044 * been specified, then all results with a RSSI smaller than the
7045 * RSSI threshold will be dropped.
7047 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7048 * the results are also dropped.
7050 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7051 (rssi
< hdev
->discovery
.rssi
|| rssi
== HCI_RSSI_INVALID
))
7054 /* Make sure that the buffer is big enough. The 5 extra bytes
7055 * are for the potential CoD field.
7057 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
7060 memset(buf
, 0, sizeof(buf
));
7062 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7063 * RSSI value was reported as 0 when not available. This behavior
7064 * is kept when using device discovery. This is required for full
7065 * backwards compatibility with the API.
7067 * However when using service discovery, the value 127 will be
7068 * returned when the RSSI is not available.
7070 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
)
7073 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7074 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7076 ev
->flags
= cpu_to_le32(flags
);
7079 /* When using service discovery and a list of UUID is
7080 * provided, results with no matching UUID should be
7081 * dropped. In case there is a match the result is
7082 * kept and checking possible scan response data
7085 if (hdev
->discovery
.uuid_count
> 0)
7086 match
= eir_has_uuids(eir
, eir_len
,
7087 hdev
->discovery
.uuid_count
,
7088 hdev
->discovery
.uuids
);
7092 if (!match
&& !scan_rsp_len
)
7095 /* Copy EIR or advertising data into event */
7096 memcpy(ev
->eir
, eir
, eir_len
);
7098 /* When using service discovery and a list of UUID is
7099 * provided, results with empty EIR or advertising data
7100 * should be dropped since they do not match any UUID.
7102 if (hdev
->discovery
.uuid_count
> 0 && !scan_rsp_len
)
7108 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
7109 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7112 if (scan_rsp_len
> 0) {
7113 /* When using service discovery and a list of UUID is
7114 * provided, results with no matching UUID should be
7115 * dropped if there is no previous match from the
7118 if (hdev
->discovery
.uuid_count
> 0) {
7119 if (!match
&& !eir_has_uuids(scan_rsp
, scan_rsp_len
,
7120 hdev
->discovery
.uuid_count
,
7121 hdev
->discovery
.uuids
))
7125 /* Append scan response data to event */
7126 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
7128 /* When using service discovery and a list of UUID is
7129 * provided, results with empty scan response and no
7130 * previous matched advertising data should be dropped.
7132 if (hdev
->discovery
.uuid_count
> 0 && !match
)
7136 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
7137 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
7139 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
7142 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7143 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
7145 struct mgmt_ev_device_found
*ev
;
7146 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
7149 ev
= (struct mgmt_ev_device_found
*) buf
;
7151 memset(buf
, 0, sizeof(buf
));
7153 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7154 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7157 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
7160 ev
->eir_len
= cpu_to_le16(eir_len
);
7162 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
7165 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
7167 struct mgmt_ev_discovering ev
;
7169 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
7171 memset(&ev
, 0, sizeof(ev
));
7172 ev
.type
= hdev
->discovery
.type
;
7173 ev
.discovering
= discovering
;
7175 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
7178 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
7180 BT_DBG("%s status %u", hdev
->name
, status
);
7183 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
7185 struct hci_request req
;
7187 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7190 hci_req_init(&req
, hdev
);
7191 enable_advertising(&req
);
7192 hci_req_run(&req
, adv_enable_complete
);