2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 13
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
105 MGMT_OP_GET_ADV_SIZE_INFO
,
106 MGMT_OP_START_LIMITED_DISCOVERY
,
107 MGMT_OP_READ_EXT_INFO
,
110 static const u16 mgmt_events
[] = {
111 MGMT_EV_CONTROLLER_ERROR
,
113 MGMT_EV_INDEX_REMOVED
,
114 MGMT_EV_NEW_SETTINGS
,
115 MGMT_EV_CLASS_OF_DEV_CHANGED
,
116 MGMT_EV_LOCAL_NAME_CHANGED
,
117 MGMT_EV_NEW_LINK_KEY
,
118 MGMT_EV_NEW_LONG_TERM_KEY
,
119 MGMT_EV_DEVICE_CONNECTED
,
120 MGMT_EV_DEVICE_DISCONNECTED
,
121 MGMT_EV_CONNECT_FAILED
,
122 MGMT_EV_PIN_CODE_REQUEST
,
123 MGMT_EV_USER_CONFIRM_REQUEST
,
124 MGMT_EV_USER_PASSKEY_REQUEST
,
126 MGMT_EV_DEVICE_FOUND
,
128 MGMT_EV_DEVICE_BLOCKED
,
129 MGMT_EV_DEVICE_UNBLOCKED
,
130 MGMT_EV_DEVICE_UNPAIRED
,
131 MGMT_EV_PASSKEY_NOTIFY
,
134 MGMT_EV_DEVICE_ADDED
,
135 MGMT_EV_DEVICE_REMOVED
,
136 MGMT_EV_NEW_CONN_PARAM
,
137 MGMT_EV_UNCONF_INDEX_ADDED
,
138 MGMT_EV_UNCONF_INDEX_REMOVED
,
139 MGMT_EV_NEW_CONFIG_OPTIONS
,
140 MGMT_EV_EXT_INDEX_ADDED
,
141 MGMT_EV_EXT_INDEX_REMOVED
,
142 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
143 MGMT_EV_ADVERTISING_ADDED
,
144 MGMT_EV_ADVERTISING_REMOVED
,
145 MGMT_EV_EXT_INFO_CHANGED
,
148 static const u16 mgmt_untrusted_commands
[] = {
149 MGMT_OP_READ_INDEX_LIST
,
151 MGMT_OP_READ_UNCONF_INDEX_LIST
,
152 MGMT_OP_READ_CONFIG_INFO
,
153 MGMT_OP_READ_EXT_INDEX_LIST
,
154 MGMT_OP_READ_EXT_INFO
,
157 static const u16 mgmt_untrusted_events
[] = {
159 MGMT_EV_INDEX_REMOVED
,
160 MGMT_EV_NEW_SETTINGS
,
161 MGMT_EV_CLASS_OF_DEV_CHANGED
,
162 MGMT_EV_LOCAL_NAME_CHANGED
,
163 MGMT_EV_UNCONF_INDEX_ADDED
,
164 MGMT_EV_UNCONF_INDEX_REMOVED
,
165 MGMT_EV_NEW_CONFIG_OPTIONS
,
166 MGMT_EV_EXT_INDEX_ADDED
,
167 MGMT_EV_EXT_INDEX_REMOVED
,
168 MGMT_EV_EXT_INFO_CHANGED
,
171 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
173 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
174 "\x00\x00\x00\x00\x00\x00\x00\x00"
176 /* HCI to MGMT error code conversion table */
177 static u8 mgmt_status_table
[] = {
179 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
180 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
181 MGMT_STATUS_FAILED
, /* Hardware Failure */
182 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
183 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
184 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
185 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
186 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
187 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
188 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
189 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
190 MGMT_STATUS_BUSY
, /* Command Disallowed */
191 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
192 MGMT_STATUS_REJECTED
, /* Rejected Security */
193 MGMT_STATUS_REJECTED
, /* Rejected Personal */
194 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
195 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
196 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
197 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
198 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
199 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
200 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
201 MGMT_STATUS_BUSY
, /* Repeated Attempts */
202 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
203 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
204 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
205 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
206 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
207 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
208 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
209 MGMT_STATUS_FAILED
, /* Unspecified Error */
210 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
211 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
212 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
213 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
214 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
215 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
216 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
217 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
218 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
219 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
220 MGMT_STATUS_FAILED
, /* Transaction Collision */
221 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
222 MGMT_STATUS_REJECTED
, /* QoS Rejected */
223 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
224 MGMT_STATUS_REJECTED
, /* Insufficient Security */
225 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
226 MGMT_STATUS_BUSY
, /* Role Switch Pending */
227 MGMT_STATUS_FAILED
, /* Slot Violation */
228 MGMT_STATUS_FAILED
, /* Role Switch Failed */
229 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
230 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
231 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
232 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
233 MGMT_STATUS_BUSY
, /* Controller Busy */
234 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
235 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
236 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
237 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
238 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
241 static u8
mgmt_status(u8 hci_status
)
243 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
244 return mgmt_status_table
[hci_status
];
246 return MGMT_STATUS_FAILED
;
249 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
252 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
256 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
257 u16 len
, int flag
, struct sock
*skip_sk
)
259 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
263 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
264 struct sock
*skip_sk
)
266 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
267 HCI_SOCK_TRUSTED
, skip_sk
);
270 static u8
le_addr_type(u8 mgmt_addr_type
)
272 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
273 return ADDR_LE_DEV_PUBLIC
;
275 return ADDR_LE_DEV_RANDOM
;
278 void mgmt_fill_version_info(void *ver
)
280 struct mgmt_rp_read_version
*rp
= ver
;
282 rp
->version
= MGMT_VERSION
;
283 rp
->revision
= cpu_to_le16(MGMT_REVISION
);
286 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
289 struct mgmt_rp_read_version rp
;
291 BT_DBG("sock %p", sk
);
293 mgmt_fill_version_info(&rp
);
295 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
299 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
302 struct mgmt_rp_read_commands
*rp
;
303 u16 num_commands
, num_events
;
307 BT_DBG("sock %p", sk
);
309 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
310 num_commands
= ARRAY_SIZE(mgmt_commands
);
311 num_events
= ARRAY_SIZE(mgmt_events
);
313 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
314 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
317 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
319 rp
= kmalloc(rp_size
, GFP_KERNEL
);
323 rp
->num_commands
= cpu_to_le16(num_commands
);
324 rp
->num_events
= cpu_to_le16(num_events
);
326 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
327 __le16
*opcode
= rp
->opcodes
;
329 for (i
= 0; i
< num_commands
; i
++, opcode
++)
330 put_unaligned_le16(mgmt_commands
[i
], opcode
);
332 for (i
= 0; i
< num_events
; i
++, opcode
++)
333 put_unaligned_le16(mgmt_events
[i
], opcode
);
335 __le16
*opcode
= rp
->opcodes
;
337 for (i
= 0; i
< num_commands
; i
++, opcode
++)
338 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
340 for (i
= 0; i
< num_events
; i
++, opcode
++)
341 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
344 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
351 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
354 struct mgmt_rp_read_index_list
*rp
;
360 BT_DBG("sock %p", sk
);
362 read_lock(&hci_dev_list_lock
);
365 list_for_each_entry(d
, &hci_dev_list
, list
) {
366 if (d
->dev_type
== HCI_PRIMARY
&&
367 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
371 rp_len
= sizeof(*rp
) + (2 * count
);
372 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
374 read_unlock(&hci_dev_list_lock
);
379 list_for_each_entry(d
, &hci_dev_list
, list
) {
380 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
381 hci_dev_test_flag(d
, HCI_CONFIG
) ||
382 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
385 /* Devices marked as raw-only are neither configured
386 * nor unconfigured controllers.
388 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
391 if (d
->dev_type
== HCI_PRIMARY
&&
392 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
393 rp
->index
[count
++] = cpu_to_le16(d
->id
);
394 BT_DBG("Added hci%u", d
->id
);
398 rp
->num_controllers
= cpu_to_le16(count
);
399 rp_len
= sizeof(*rp
) + (2 * count
);
401 read_unlock(&hci_dev_list_lock
);
403 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
411 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
412 void *data
, u16 data_len
)
414 struct mgmt_rp_read_unconf_index_list
*rp
;
420 BT_DBG("sock %p", sk
);
422 read_lock(&hci_dev_list_lock
);
425 list_for_each_entry(d
, &hci_dev_list
, list
) {
426 if (d
->dev_type
== HCI_PRIMARY
&&
427 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
431 rp_len
= sizeof(*rp
) + (2 * count
);
432 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
434 read_unlock(&hci_dev_list_lock
);
439 list_for_each_entry(d
, &hci_dev_list
, list
) {
440 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
441 hci_dev_test_flag(d
, HCI_CONFIG
) ||
442 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
445 /* Devices marked as raw-only are neither configured
446 * nor unconfigured controllers.
448 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
451 if (d
->dev_type
== HCI_PRIMARY
&&
452 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
453 rp
->index
[count
++] = cpu_to_le16(d
->id
);
454 BT_DBG("Added hci%u", d
->id
);
458 rp
->num_controllers
= cpu_to_le16(count
);
459 rp_len
= sizeof(*rp
) + (2 * count
);
461 read_unlock(&hci_dev_list_lock
);
463 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
464 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
471 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
472 void *data
, u16 data_len
)
474 struct mgmt_rp_read_ext_index_list
*rp
;
480 BT_DBG("sock %p", sk
);
482 read_lock(&hci_dev_list_lock
);
485 list_for_each_entry(d
, &hci_dev_list
, list
) {
486 if (d
->dev_type
== HCI_PRIMARY
|| d
->dev_type
== HCI_AMP
)
490 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
491 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
493 read_unlock(&hci_dev_list_lock
);
498 list_for_each_entry(d
, &hci_dev_list
, list
) {
499 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
500 hci_dev_test_flag(d
, HCI_CONFIG
) ||
501 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
504 /* Devices marked as raw-only are neither configured
505 * nor unconfigured controllers.
507 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
510 if (d
->dev_type
== HCI_PRIMARY
) {
511 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
512 rp
->entry
[count
].type
= 0x01;
514 rp
->entry
[count
].type
= 0x00;
515 } else if (d
->dev_type
== HCI_AMP
) {
516 rp
->entry
[count
].type
= 0x02;
521 rp
->entry
[count
].bus
= d
->bus
;
522 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
523 BT_DBG("Added hci%u", d
->id
);
526 rp
->num_controllers
= cpu_to_le16(count
);
527 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
529 read_unlock(&hci_dev_list_lock
);
531 /* If this command is called at least once, then all the
532 * default index and unconfigured index events are disabled
533 * and from now on only extended index events are used.
535 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
536 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
537 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
539 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
540 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
547 static bool is_configured(struct hci_dev
*hdev
)
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
550 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
553 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
554 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
560 static __le32
get_missing_options(struct hci_dev
*hdev
)
564 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
565 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
566 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
568 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
569 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
570 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
572 return cpu_to_le32(options
);
575 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
577 __le32 options
= get_missing_options(hdev
);
579 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
580 sizeof(options
), HCI_MGMT_OPTION_EVENTS
, skip
);
583 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
585 __le32 options
= get_missing_options(hdev
);
587 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
591 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
592 void *data
, u16 data_len
)
594 struct mgmt_rp_read_config_info rp
;
597 BT_DBG("sock %p %s", sk
, hdev
->name
);
601 memset(&rp
, 0, sizeof(rp
));
602 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
605 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
607 if (hdev
->set_bdaddr
)
608 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
610 rp
.supported_options
= cpu_to_le32(options
);
611 rp
.missing_options
= get_missing_options(hdev
);
613 hci_dev_unlock(hdev
);
615 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
619 static u32
get_supported_settings(struct hci_dev
*hdev
)
623 settings
|= MGMT_SETTING_POWERED
;
624 settings
|= MGMT_SETTING_BONDABLE
;
625 settings
|= MGMT_SETTING_DEBUG_KEYS
;
626 settings
|= MGMT_SETTING_CONNECTABLE
;
627 settings
|= MGMT_SETTING_DISCOVERABLE
;
629 if (lmp_bredr_capable(hdev
)) {
630 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
631 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
632 settings
|= MGMT_SETTING_BREDR
;
633 settings
|= MGMT_SETTING_LINK_SECURITY
;
635 if (lmp_ssp_capable(hdev
)) {
636 settings
|= MGMT_SETTING_SSP
;
637 settings
|= MGMT_SETTING_HS
;
640 if (lmp_sc_capable(hdev
))
641 settings
|= MGMT_SETTING_SECURE_CONN
;
644 if (lmp_le_capable(hdev
)) {
645 settings
|= MGMT_SETTING_LE
;
646 settings
|= MGMT_SETTING_ADVERTISING
;
647 settings
|= MGMT_SETTING_SECURE_CONN
;
648 settings
|= MGMT_SETTING_PRIVACY
;
649 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
652 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
654 settings
|= MGMT_SETTING_CONFIGURATION
;
659 static u32
get_current_settings(struct hci_dev
*hdev
)
663 if (hdev_is_powered(hdev
))
664 settings
|= MGMT_SETTING_POWERED
;
666 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
667 settings
|= MGMT_SETTING_CONNECTABLE
;
669 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
670 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
672 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
673 settings
|= MGMT_SETTING_DISCOVERABLE
;
675 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
676 settings
|= MGMT_SETTING_BONDABLE
;
678 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
679 settings
|= MGMT_SETTING_BREDR
;
681 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
682 settings
|= MGMT_SETTING_LE
;
684 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
685 settings
|= MGMT_SETTING_LINK_SECURITY
;
687 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
688 settings
|= MGMT_SETTING_SSP
;
690 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
691 settings
|= MGMT_SETTING_HS
;
693 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
694 settings
|= MGMT_SETTING_ADVERTISING
;
696 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
697 settings
|= MGMT_SETTING_SECURE_CONN
;
699 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
700 settings
|= MGMT_SETTING_DEBUG_KEYS
;
702 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
703 settings
|= MGMT_SETTING_PRIVACY
;
705 /* The current setting for static address has two purposes. The
706 * first is to indicate if the static address will be used and
707 * the second is to indicate if it is actually set.
709 * This means if the static address is not configured, this flag
710 * will never be set. If the address is configured, then if the
711 * address is actually used decides if the flag is set or not.
713 * For single mode LE only controllers and dual-mode controllers
714 * with BR/EDR disabled, the existence of the static address will
717 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
718 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
719 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
720 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
721 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
727 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
729 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
732 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
733 struct hci_dev
*hdev
,
736 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
739 u8
mgmt_get_adv_discov_flags(struct hci_dev
*hdev
)
741 struct mgmt_pending_cmd
*cmd
;
743 /* If there's a pending mgmt command the flags will not yet have
744 * their final values, so check for this first.
746 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
748 struct mgmt_mode
*cp
= cmd
->param
;
750 return LE_AD_GENERAL
;
751 else if (cp
->val
== 0x02)
752 return LE_AD_LIMITED
;
754 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
755 return LE_AD_LIMITED
;
756 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
757 return LE_AD_GENERAL
;
763 bool mgmt_get_connectable(struct hci_dev
*hdev
)
765 struct mgmt_pending_cmd
*cmd
;
767 /* If there's a pending mgmt command the flag will not yet have
768 * it's final value, so check for this first.
770 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
772 struct mgmt_mode
*cp
= cmd
->param
;
777 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
780 static void service_cache_off(struct work_struct
*work
)
782 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
784 struct hci_request req
;
786 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
789 hci_req_init(&req
, hdev
);
793 __hci_req_update_eir(&req
);
794 __hci_req_update_class(&req
);
796 hci_dev_unlock(hdev
);
798 hci_req_run(&req
, NULL
);
801 static void rpa_expired(struct work_struct
*work
)
803 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
805 struct hci_request req
;
809 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
811 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
814 /* The generation of a new RPA and programming it into the
815 * controller happens in the hci_req_enable_advertising()
818 hci_req_init(&req
, hdev
);
819 __hci_req_enable_advertising(&req
);
820 hci_req_run(&req
, NULL
);
823 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
825 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
828 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
829 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
831 /* Non-mgmt controlled devices get this bit set
832 * implicitly so that pairing works for them, however
833 * for mgmt we require user-space to explicitly enable
836 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
839 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
840 void *data
, u16 data_len
)
842 struct mgmt_rp_read_info rp
;
844 BT_DBG("sock %p %s", sk
, hdev
->name
);
848 memset(&rp
, 0, sizeof(rp
));
850 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
852 rp
.version
= hdev
->hci_ver
;
853 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
855 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
856 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
858 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
860 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
861 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
863 hci_dev_unlock(hdev
);
865 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
869 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
872 eir
[eir_len
++] = sizeof(type
) + data_len
;
873 eir
[eir_len
++] = type
;
874 memcpy(&eir
[eir_len
], data
, data_len
);
880 static int read_ext_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
881 void *data
, u16 data_len
)
883 struct mgmt_rp_read_ext_info
*rp
;
888 BT_DBG("sock %p %s", sk
, hdev
->name
);
892 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
893 eir_len
= eir_append_data(buff
, eir_len
,
897 name_len
= strlen(hdev
->dev_name
);
898 eir_len
= eir_append_data(buff
, eir_len
, EIR_NAME_COMPLETE
,
899 hdev
->dev_name
, name_len
);
901 name_len
= strlen(hdev
->short_name
);
902 eir_len
= eir_append_data(buff
, eir_len
, EIR_NAME_SHORT
,
903 hdev
->short_name
, name_len
);
905 rp
= kzalloc(sizeof(*rp
) + eir_len
, GFP_KERNEL
);
909 rp
->eir_len
= cpu_to_le16(eir_len
);
910 memcpy(rp
->eir
, buff
, eir_len
);
912 bacpy(&rp
->bdaddr
, &hdev
->bdaddr
);
914 rp
->version
= hdev
->hci_ver
;
915 rp
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
917 rp
->supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
918 rp
->current_settings
= cpu_to_le32(get_current_settings(hdev
));
920 hci_dev_unlock(hdev
);
922 /* If this command is called at least once, then the events
923 * for class of device and local name changes are disabled
924 * and only the new extended controller information event
927 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INFO_EVENTS
);
928 hci_sock_clear_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
929 hci_sock_clear_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
931 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_EXT_INFO
, 0, rp
,
932 sizeof(*rp
) + eir_len
);
935 static int ext_info_changed(struct hci_dev
*hdev
, struct sock
*skip
)
937 struct mgmt_ev_ext_info_changed ev
;
939 ev
.eir_len
= cpu_to_le16(0);
941 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED
, hdev
, &ev
,
942 sizeof(ev
), HCI_MGMT_EXT_INFO_EVENTS
, skip
);
945 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
947 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
949 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
953 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
955 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
957 if (hci_conn_count(hdev
) == 0) {
958 cancel_delayed_work(&hdev
->power_off
);
959 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
963 void mgmt_advertising_added(struct sock
*sk
, struct hci_dev
*hdev
, u8 instance
)
965 struct mgmt_ev_advertising_added ev
;
967 ev
.instance
= instance
;
969 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
972 void mgmt_advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
975 struct mgmt_ev_advertising_removed ev
;
977 ev
.instance
= instance
;
979 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
982 static void cancel_adv_timeout(struct hci_dev
*hdev
)
984 if (hdev
->adv_instance_timeout
) {
985 hdev
->adv_instance_timeout
= 0;
986 cancel_delayed_work(&hdev
->adv_instance_expire
);
990 static int clean_up_hci_state(struct hci_dev
*hdev
)
992 struct hci_request req
;
993 struct hci_conn
*conn
;
997 hci_req_init(&req
, hdev
);
999 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1000 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1002 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1005 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, false);
1007 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1008 __hci_req_disable_advertising(&req
);
1010 discov_stopped
= hci_req_stop_discovery(&req
);
1012 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1013 /* 0x15 == Terminated due to Power Off */
1014 __hci_abort_conn(&req
, conn
, 0x15);
1017 err
= hci_req_run(&req
, clean_up_hci_complete
);
1018 if (!err
&& discov_stopped
)
1019 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1024 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1027 struct mgmt_mode
*cp
= data
;
1028 struct mgmt_pending_cmd
*cmd
;
1031 BT_DBG("request for %s", hdev
->name
);
1033 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1034 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1035 MGMT_STATUS_INVALID_PARAMS
);
1039 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1040 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1045 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1046 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1050 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1057 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1060 /* Disconnect connections, stop scans, etc */
1061 err
= clean_up_hci_state(hdev
);
1063 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1064 HCI_POWER_OFF_TIMEOUT
);
1066 /* ENODATA means there were no HCI commands queued */
1067 if (err
== -ENODATA
) {
1068 cancel_delayed_work(&hdev
->power_off
);
1069 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1075 hci_dev_unlock(hdev
);
1079 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1081 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1083 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1084 sizeof(ev
), HCI_MGMT_SETTING_EVENTS
, skip
);
1087 int mgmt_new_settings(struct hci_dev
*hdev
)
1089 return new_settings(hdev
, NULL
);
1094 struct hci_dev
*hdev
;
1098 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1100 struct cmd_lookup
*match
= data
;
1102 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1104 list_del(&cmd
->list
);
1106 if (match
->sk
== NULL
) {
1107 match
->sk
= cmd
->sk
;
1108 sock_hold(match
->sk
);
1111 mgmt_pending_free(cmd
);
1114 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1118 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1119 mgmt_pending_remove(cmd
);
1122 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1124 if (cmd
->cmd_complete
) {
1127 cmd
->cmd_complete(cmd
, *status
);
1128 mgmt_pending_remove(cmd
);
1133 cmd_status_rsp(cmd
, data
);
1136 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1138 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1139 cmd
->param
, cmd
->param_len
);
1142 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1144 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1145 cmd
->param
, sizeof(struct mgmt_addr_info
));
1148 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1150 if (!lmp_bredr_capable(hdev
))
1151 return MGMT_STATUS_NOT_SUPPORTED
;
1152 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1153 return MGMT_STATUS_REJECTED
;
1155 return MGMT_STATUS_SUCCESS
;
1158 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1160 if (!lmp_le_capable(hdev
))
1161 return MGMT_STATUS_NOT_SUPPORTED
;
1162 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1163 return MGMT_STATUS_REJECTED
;
1165 return MGMT_STATUS_SUCCESS
;
1168 void mgmt_set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1170 struct mgmt_pending_cmd
*cmd
;
1172 BT_DBG("status 0x%02x", status
);
1176 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1181 u8 mgmt_err
= mgmt_status(status
);
1182 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1183 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1187 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1188 hdev
->discov_timeout
> 0) {
1189 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1190 queue_delayed_work(hdev
->req_workqueue
, &hdev
->discov_off
, to
);
1193 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1194 new_settings(hdev
, cmd
->sk
);
1197 mgmt_pending_remove(cmd
);
1200 hci_dev_unlock(hdev
);
1203 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1206 struct mgmt_cp_set_discoverable
*cp
= data
;
1207 struct mgmt_pending_cmd
*cmd
;
1211 BT_DBG("request for %s", hdev
->name
);
1213 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1214 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1215 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1216 MGMT_STATUS_REJECTED
);
1218 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1219 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1220 MGMT_STATUS_INVALID_PARAMS
);
1222 timeout
= __le16_to_cpu(cp
->timeout
);
1224 /* Disabling discoverable requires that no timeout is set,
1225 * and enabling limited discoverable requires a timeout.
1227 if ((cp
->val
== 0x00 && timeout
> 0) ||
1228 (cp
->val
== 0x02 && timeout
== 0))
1229 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1230 MGMT_STATUS_INVALID_PARAMS
);
1234 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1235 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1236 MGMT_STATUS_NOT_POWERED
);
1240 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1241 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1242 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1247 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1248 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1249 MGMT_STATUS_REJECTED
);
1253 if (!hdev_is_powered(hdev
)) {
1254 bool changed
= false;
1256 /* Setting limited discoverable when powered off is
1257 * not a valid operation since it requires a timeout
1258 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1260 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1261 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1265 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1270 err
= new_settings(hdev
, sk
);
1275 /* If the current mode is the same, then just update the timeout
1276 * value with the new value. And if only the timeout gets updated,
1277 * then no need for any HCI transactions.
1279 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1280 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1281 HCI_LIMITED_DISCOVERABLE
)) {
1282 cancel_delayed_work(&hdev
->discov_off
);
1283 hdev
->discov_timeout
= timeout
;
1285 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1286 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1287 queue_delayed_work(hdev
->req_workqueue
,
1288 &hdev
->discov_off
, to
);
1291 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1295 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1301 /* Cancel any potential discoverable timeout that might be
1302 * still active and store new timeout value. The arming of
1303 * the timeout happens in the complete handler.
1305 cancel_delayed_work(&hdev
->discov_off
);
1306 hdev
->discov_timeout
= timeout
;
1309 hci_dev_set_flag(hdev
, HCI_DISCOVERABLE
);
1311 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1313 /* Limited discoverable mode */
1314 if (cp
->val
== 0x02)
1315 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1317 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1319 queue_work(hdev
->req_workqueue
, &hdev
->discoverable_update
);
1323 hci_dev_unlock(hdev
);
1327 void mgmt_set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1329 struct mgmt_pending_cmd
*cmd
;
1331 BT_DBG("status 0x%02x", status
);
1335 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1340 u8 mgmt_err
= mgmt_status(status
);
1341 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1345 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1346 new_settings(hdev
, cmd
->sk
);
1349 mgmt_pending_remove(cmd
);
1352 hci_dev_unlock(hdev
);
1355 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1356 struct sock
*sk
, u8 val
)
1358 bool changed
= false;
1361 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
1365 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1367 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1368 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1371 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1376 hci_req_update_scan(hdev
);
1377 hci_update_background_scan(hdev
);
1378 return new_settings(hdev
, sk
);
1384 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1387 struct mgmt_mode
*cp
= data
;
1388 struct mgmt_pending_cmd
*cmd
;
1391 BT_DBG("request for %s", hdev
->name
);
1393 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1394 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1395 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1396 MGMT_STATUS_REJECTED
);
1398 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1399 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1400 MGMT_STATUS_INVALID_PARAMS
);
1404 if (!hdev_is_powered(hdev
)) {
1405 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1409 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1410 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1411 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1416 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1423 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1425 if (hdev
->discov_timeout
> 0)
1426 cancel_delayed_work(&hdev
->discov_off
);
1428 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1429 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1430 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1433 queue_work(hdev
->req_workqueue
, &hdev
->connectable_update
);
1437 hci_dev_unlock(hdev
);
1441 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1444 struct mgmt_mode
*cp
= data
;
1448 BT_DBG("request for %s", hdev
->name
);
1450 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1451 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1452 MGMT_STATUS_INVALID_PARAMS
);
1457 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
1459 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
1461 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1466 /* In limited privacy mode the change of bondable mode
1467 * may affect the local advertising address.
1469 if (hdev_is_powered(hdev
) &&
1470 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1471 hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1472 hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
1473 queue_work(hdev
->req_workqueue
,
1474 &hdev
->discoverable_update
);
1476 err
= new_settings(hdev
, sk
);
1480 hci_dev_unlock(hdev
);
1484 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1487 struct mgmt_mode
*cp
= data
;
1488 struct mgmt_pending_cmd
*cmd
;
1492 BT_DBG("request for %s", hdev
->name
);
1494 status
= mgmt_bredr_support(hdev
);
1496 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1499 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1500 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1501 MGMT_STATUS_INVALID_PARAMS
);
1505 if (!hdev_is_powered(hdev
)) {
1506 bool changed
= false;
1508 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
1509 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
1513 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1518 err
= new_settings(hdev
, sk
);
1523 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1524 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1531 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1532 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1536 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1542 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1544 mgmt_pending_remove(cmd
);
1549 hci_dev_unlock(hdev
);
1553 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1555 struct mgmt_mode
*cp
= data
;
1556 struct mgmt_pending_cmd
*cmd
;
1560 BT_DBG("request for %s", hdev
->name
);
1562 status
= mgmt_bredr_support(hdev
);
1564 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1566 if (!lmp_ssp_capable(hdev
))
1567 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1568 MGMT_STATUS_NOT_SUPPORTED
);
1570 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1571 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1572 MGMT_STATUS_INVALID_PARAMS
);
1576 if (!hdev_is_powered(hdev
)) {
1580 changed
= !hci_dev_test_and_set_flag(hdev
,
1583 changed
= hci_dev_test_and_clear_flag(hdev
,
1586 changed
= hci_dev_test_and_clear_flag(hdev
,
1589 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
1592 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1597 err
= new_settings(hdev
, sk
);
1602 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1603 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1608 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
1609 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1613 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1619 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
1620 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
1621 sizeof(cp
->val
), &cp
->val
);
1623 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1625 mgmt_pending_remove(cmd
);
1630 hci_dev_unlock(hdev
);
1634 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1636 struct mgmt_mode
*cp
= data
;
1641 BT_DBG("request for %s", hdev
->name
);
1643 status
= mgmt_bredr_support(hdev
);
1645 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1647 if (!lmp_ssp_capable(hdev
))
1648 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1649 MGMT_STATUS_NOT_SUPPORTED
);
1651 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1652 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1653 MGMT_STATUS_REJECTED
);
1655 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1656 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1657 MGMT_STATUS_INVALID_PARAMS
);
1661 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1662 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1668 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
1670 if (hdev_is_powered(hdev
)) {
1671 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1672 MGMT_STATUS_REJECTED
);
1676 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
1679 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1684 err
= new_settings(hdev
, sk
);
1687 hci_dev_unlock(hdev
);
1691 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1693 struct cmd_lookup match
= { NULL
, hdev
};
1698 u8 mgmt_err
= mgmt_status(status
);
1700 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1705 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1707 new_settings(hdev
, match
.sk
);
1712 /* Make sure the controller has a good default for
1713 * advertising data. Restrict the update to when LE
1714 * has actually been enabled. During power on, the
1715 * update in powered_update_hci will take care of it.
1717 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1718 struct hci_request req
;
1720 hci_req_init(&req
, hdev
);
1721 __hci_req_update_adv_data(&req
, 0x00);
1722 __hci_req_update_scan_rsp_data(&req
, 0x00);
1723 hci_req_run(&req
, NULL
);
1724 hci_update_background_scan(hdev
);
1728 hci_dev_unlock(hdev
);
1731 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1733 struct mgmt_mode
*cp
= data
;
1734 struct hci_cp_write_le_host_supported hci_cp
;
1735 struct mgmt_pending_cmd
*cmd
;
1736 struct hci_request req
;
1740 BT_DBG("request for %s", hdev
->name
);
1742 if (!lmp_le_capable(hdev
))
1743 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1744 MGMT_STATUS_NOT_SUPPORTED
);
1746 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1747 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1748 MGMT_STATUS_INVALID_PARAMS
);
1750 /* Bluetooth single mode LE only controllers or dual-mode
1751 * controllers configured as LE only devices, do not allow
1752 * switching LE off. These have either LE enabled explicitly
1753 * or BR/EDR has been previously switched off.
1755 * When trying to enable an already enabled LE, then gracefully
1756 * send a positive response. Trying to disable it however will
1757 * result into rejection.
1759 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1760 if (cp
->val
== 0x01)
1761 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1763 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1764 MGMT_STATUS_REJECTED
);
1770 enabled
= lmp_host_le_capable(hdev
);
1773 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, true);
1775 if (!hdev_is_powered(hdev
) || val
== enabled
) {
1776 bool changed
= false;
1778 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1779 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
1783 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
1784 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
1788 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1793 err
= new_settings(hdev
, sk
);
1798 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
1799 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
1800 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1805 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
1811 hci_req_init(&req
, hdev
);
1813 memset(&hci_cp
, 0, sizeof(hci_cp
));
1817 hci_cp
.simul
= 0x00;
1819 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1820 __hci_req_disable_advertising(&req
);
1823 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
1826 err
= hci_req_run(&req
, le_enable_complete
);
1828 mgmt_pending_remove(cmd
);
1831 hci_dev_unlock(hdev
);
1835 /* This is a helper function to test for pending mgmt commands that can
1836 * cause CoD or EIR HCI commands. We can only allow one such pending
1837 * mgmt command at a time since otherwise we cannot easily track what
1838 * the current values are, will be, and based on that calculate if a new
1839 * HCI command needs to be sent and if yes with what value.
1841 static bool pending_eir_or_class(struct hci_dev
*hdev
)
1843 struct mgmt_pending_cmd
*cmd
;
1845 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
1846 switch (cmd
->opcode
) {
1847 case MGMT_OP_ADD_UUID
:
1848 case MGMT_OP_REMOVE_UUID
:
1849 case MGMT_OP_SET_DEV_CLASS
:
1850 case MGMT_OP_SET_POWERED
:
1858 static const u8 bluetooth_base_uuid
[] = {
1859 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1860 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1863 static u8
get_uuid_size(const u8
*uuid
)
1867 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
1870 val
= get_unaligned_le32(&uuid
[12]);
1877 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
1879 struct mgmt_pending_cmd
*cmd
;
1883 cmd
= pending_find(mgmt_op
, hdev
);
1887 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
1888 mgmt_status(status
), hdev
->dev_class
, 3);
1890 mgmt_pending_remove(cmd
);
1893 hci_dev_unlock(hdev
);
1896 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1898 BT_DBG("status 0x%02x", status
);
1900 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
1903 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1905 struct mgmt_cp_add_uuid
*cp
= data
;
1906 struct mgmt_pending_cmd
*cmd
;
1907 struct hci_request req
;
1908 struct bt_uuid
*uuid
;
1911 BT_DBG("request for %s", hdev
->name
);
1915 if (pending_eir_or_class(hdev
)) {
1916 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
1921 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
1927 memcpy(uuid
->uuid
, cp
->uuid
, 16);
1928 uuid
->svc_hint
= cp
->svc_hint
;
1929 uuid
->size
= get_uuid_size(cp
->uuid
);
1931 list_add_tail(&uuid
->list
, &hdev
->uuids
);
1933 hci_req_init(&req
, hdev
);
1935 __hci_req_update_class(&req
);
1936 __hci_req_update_eir(&req
);
1938 err
= hci_req_run(&req
, add_uuid_complete
);
1940 if (err
!= -ENODATA
)
1943 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
1944 hdev
->dev_class
, 3);
1948 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
1957 hci_dev_unlock(hdev
);
1961 static bool enable_service_cache(struct hci_dev
*hdev
)
1963 if (!hdev_is_powered(hdev
))
1966 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
1967 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
1975 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1977 BT_DBG("status 0x%02x", status
);
1979 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
1982 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1985 struct mgmt_cp_remove_uuid
*cp
= data
;
1986 struct mgmt_pending_cmd
*cmd
;
1987 struct bt_uuid
*match
, *tmp
;
1988 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1989 struct hci_request req
;
1992 BT_DBG("request for %s", hdev
->name
);
1996 if (pending_eir_or_class(hdev
)) {
1997 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2002 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2003 hci_uuids_clear(hdev
);
2005 if (enable_service_cache(hdev
)) {
2006 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2007 MGMT_OP_REMOVE_UUID
,
2008 0, hdev
->dev_class
, 3);
2017 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2018 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2021 list_del(&match
->list
);
2027 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2028 MGMT_STATUS_INVALID_PARAMS
);
2033 hci_req_init(&req
, hdev
);
2035 __hci_req_update_class(&req
);
2036 __hci_req_update_eir(&req
);
2038 err
= hci_req_run(&req
, remove_uuid_complete
);
2040 if (err
!= -ENODATA
)
2043 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2044 hdev
->dev_class
, 3);
2048 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2057 hci_dev_unlock(hdev
);
2061 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2063 BT_DBG("status 0x%02x", status
);
2065 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2068 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2071 struct mgmt_cp_set_dev_class
*cp
= data
;
2072 struct mgmt_pending_cmd
*cmd
;
2073 struct hci_request req
;
2076 BT_DBG("request for %s", hdev
->name
);
2078 if (!lmp_bredr_capable(hdev
))
2079 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2080 MGMT_STATUS_NOT_SUPPORTED
);
2084 if (pending_eir_or_class(hdev
)) {
2085 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2090 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2091 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2092 MGMT_STATUS_INVALID_PARAMS
);
2096 hdev
->major_class
= cp
->major
;
2097 hdev
->minor_class
= cp
->minor
;
2099 if (!hdev_is_powered(hdev
)) {
2100 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2101 hdev
->dev_class
, 3);
2105 hci_req_init(&req
, hdev
);
2107 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2108 hci_dev_unlock(hdev
);
2109 cancel_delayed_work_sync(&hdev
->service_cache
);
2111 __hci_req_update_eir(&req
);
2114 __hci_req_update_class(&req
);
2116 err
= hci_req_run(&req
, set_class_complete
);
2118 if (err
!= -ENODATA
)
2121 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2122 hdev
->dev_class
, 3);
2126 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2135 hci_dev_unlock(hdev
);
2139 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2142 struct mgmt_cp_load_link_keys
*cp
= data
;
2143 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2144 sizeof(struct mgmt_link_key_info
));
2145 u16 key_count
, expected_len
;
2149 BT_DBG("request for %s", hdev
->name
);
2151 if (!lmp_bredr_capable(hdev
))
2152 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2153 MGMT_STATUS_NOT_SUPPORTED
);
2155 key_count
= __le16_to_cpu(cp
->key_count
);
2156 if (key_count
> max_key_count
) {
2157 BT_ERR("load_link_keys: too big key_count value %u",
2159 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2160 MGMT_STATUS_INVALID_PARAMS
);
2163 expected_len
= sizeof(*cp
) + key_count
*
2164 sizeof(struct mgmt_link_key_info
);
2165 if (expected_len
!= len
) {
2166 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2168 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2169 MGMT_STATUS_INVALID_PARAMS
);
2172 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2173 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2174 MGMT_STATUS_INVALID_PARAMS
);
2176 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2179 for (i
= 0; i
< key_count
; i
++) {
2180 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2182 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2183 return mgmt_cmd_status(sk
, hdev
->id
,
2184 MGMT_OP_LOAD_LINK_KEYS
,
2185 MGMT_STATUS_INVALID_PARAMS
);
2190 hci_link_keys_clear(hdev
);
2193 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2195 changed
= hci_dev_test_and_clear_flag(hdev
,
2196 HCI_KEEP_DEBUG_KEYS
);
2199 new_settings(hdev
, NULL
);
2201 for (i
= 0; i
< key_count
; i
++) {
2202 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2204 /* Always ignore debug keys and require a new pairing if
2205 * the user wants to use them.
2207 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2210 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2211 key
->type
, key
->pin_len
, NULL
);
2214 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2216 hci_dev_unlock(hdev
);
2221 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2222 u8 addr_type
, struct sock
*skip_sk
)
2224 struct mgmt_ev_device_unpaired ev
;
2226 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2227 ev
.addr
.type
= addr_type
;
2229 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2233 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2236 struct mgmt_cp_unpair_device
*cp
= data
;
2237 struct mgmt_rp_unpair_device rp
;
2238 struct hci_conn_params
*params
;
2239 struct mgmt_pending_cmd
*cmd
;
2240 struct hci_conn
*conn
;
2244 memset(&rp
, 0, sizeof(rp
));
2245 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2246 rp
.addr
.type
= cp
->addr
.type
;
2248 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2249 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2250 MGMT_STATUS_INVALID_PARAMS
,
2253 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2254 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2255 MGMT_STATUS_INVALID_PARAMS
,
2260 if (!hdev_is_powered(hdev
)) {
2261 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2262 MGMT_STATUS_NOT_POWERED
, &rp
,
2267 if (cp
->addr
.type
== BDADDR_BREDR
) {
2268 /* If disconnection is requested, then look up the
2269 * connection. If the remote device is connected, it
2270 * will be later used to terminate the link.
2272 * Setting it to NULL explicitly will cause no
2273 * termination of the link.
2276 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2281 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2283 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2284 MGMT_OP_UNPAIR_DEVICE
,
2285 MGMT_STATUS_NOT_PAIRED
, &rp
,
2293 /* LE address type */
2294 addr_type
= le_addr_type(cp
->addr
.type
);
2296 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2298 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2300 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2301 MGMT_STATUS_NOT_PAIRED
, &rp
,
2306 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2308 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2312 /* Abort any ongoing SMP pairing */
2313 smp_cancel_pairing(conn
);
2315 /* Defer clearing up the connection parameters until closing to
2316 * give a chance of keeping them if a repairing happens.
2318 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2320 /* Disable auto-connection parameters if present */
2321 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2323 if (params
->explicit_connect
)
2324 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2326 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2329 /* If disconnection is not requested, then clear the connection
2330 * variable so that the link is not terminated.
2332 if (!cp
->disconnect
)
2336 /* If the connection variable is set, then termination of the
2337 * link is requested.
2340 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2342 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2346 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2353 cmd
->cmd_complete
= addr_cmd_complete
;
2355 err
= hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2357 mgmt_pending_remove(cmd
);
2360 hci_dev_unlock(hdev
);
2364 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2367 struct mgmt_cp_disconnect
*cp
= data
;
2368 struct mgmt_rp_disconnect rp
;
2369 struct mgmt_pending_cmd
*cmd
;
2370 struct hci_conn
*conn
;
2375 memset(&rp
, 0, sizeof(rp
));
2376 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2377 rp
.addr
.type
= cp
->addr
.type
;
2379 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2380 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2381 MGMT_STATUS_INVALID_PARAMS
,
2386 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2387 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2388 MGMT_STATUS_NOT_POWERED
, &rp
,
2393 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2394 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2395 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2399 if (cp
->addr
.type
== BDADDR_BREDR
)
2400 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2403 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
2404 le_addr_type(cp
->addr
.type
));
2406 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2407 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2408 MGMT_STATUS_NOT_CONNECTED
, &rp
,
2413 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2419 cmd
->cmd_complete
= generic_cmd_complete
;
2421 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2423 mgmt_pending_remove(cmd
);
2426 hci_dev_unlock(hdev
);
2430 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2432 switch (link_type
) {
2434 switch (addr_type
) {
2435 case ADDR_LE_DEV_PUBLIC
:
2436 return BDADDR_LE_PUBLIC
;
2439 /* Fallback to LE Random address type */
2440 return BDADDR_LE_RANDOM
;
2444 /* Fallback to BR/EDR type */
2445 return BDADDR_BREDR
;
2449 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2452 struct mgmt_rp_get_connections
*rp
;
2462 if (!hdev_is_powered(hdev
)) {
2463 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2464 MGMT_STATUS_NOT_POWERED
);
2469 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2470 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2474 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2475 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2482 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2483 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2485 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2486 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2487 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2492 rp
->conn_count
= cpu_to_le16(i
);
2494 /* Recalculate length in case of filtered SCO connections, etc */
2495 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2497 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2503 hci_dev_unlock(hdev
);
2507 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2508 struct mgmt_cp_pin_code_neg_reply
*cp
)
2510 struct mgmt_pending_cmd
*cmd
;
2513 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2518 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2519 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2521 mgmt_pending_remove(cmd
);
2526 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2529 struct hci_conn
*conn
;
2530 struct mgmt_cp_pin_code_reply
*cp
= data
;
2531 struct hci_cp_pin_code_reply reply
;
2532 struct mgmt_pending_cmd
*cmd
;
2539 if (!hdev_is_powered(hdev
)) {
2540 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2541 MGMT_STATUS_NOT_POWERED
);
2545 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2547 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2548 MGMT_STATUS_NOT_CONNECTED
);
2552 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2553 struct mgmt_cp_pin_code_neg_reply ncp
;
2555 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2557 BT_ERR("PIN code is not 16 bytes long");
2559 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2561 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2562 MGMT_STATUS_INVALID_PARAMS
);
2567 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2573 cmd
->cmd_complete
= addr_cmd_complete
;
2575 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2576 reply
.pin_len
= cp
->pin_len
;
2577 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2579 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2581 mgmt_pending_remove(cmd
);
2584 hci_dev_unlock(hdev
);
2588 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2591 struct mgmt_cp_set_io_capability
*cp
= data
;
2595 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2596 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2597 MGMT_STATUS_INVALID_PARAMS
);
2601 hdev
->io_capability
= cp
->io_capability
;
2603 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2604 hdev
->io_capability
);
2606 hci_dev_unlock(hdev
);
2608 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
2612 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
2614 struct hci_dev
*hdev
= conn
->hdev
;
2615 struct mgmt_pending_cmd
*cmd
;
2617 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2618 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2621 if (cmd
->user_data
!= conn
)
2630 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
2632 struct mgmt_rp_pair_device rp
;
2633 struct hci_conn
*conn
= cmd
->user_data
;
2636 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2637 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2639 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
2640 status
, &rp
, sizeof(rp
));
2642 /* So we don't get further callbacks for this connection */
2643 conn
->connect_cfm_cb
= NULL
;
2644 conn
->security_cfm_cb
= NULL
;
2645 conn
->disconn_cfm_cb
= NULL
;
2647 hci_conn_drop(conn
);
2649 /* The device is paired so there is no need to remove
2650 * its connection parameters anymore.
2652 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2659 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2661 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2662 struct mgmt_pending_cmd
*cmd
;
2664 cmd
= find_pairing(conn
);
2666 cmd
->cmd_complete(cmd
, status
);
2667 mgmt_pending_remove(cmd
);
2671 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2673 struct mgmt_pending_cmd
*cmd
;
2675 BT_DBG("status %u", status
);
2677 cmd
= find_pairing(conn
);
2679 BT_DBG("Unable to find a pending command");
2683 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2684 mgmt_pending_remove(cmd
);
2687 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2689 struct mgmt_pending_cmd
*cmd
;
2691 BT_DBG("status %u", status
);
2696 cmd
= find_pairing(conn
);
2698 BT_DBG("Unable to find a pending command");
2702 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2703 mgmt_pending_remove(cmd
);
2706 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2709 struct mgmt_cp_pair_device
*cp
= data
;
2710 struct mgmt_rp_pair_device rp
;
2711 struct mgmt_pending_cmd
*cmd
;
2712 u8 sec_level
, auth_type
;
2713 struct hci_conn
*conn
;
2718 memset(&rp
, 0, sizeof(rp
));
2719 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2720 rp
.addr
.type
= cp
->addr
.type
;
2722 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2723 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2724 MGMT_STATUS_INVALID_PARAMS
,
2727 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
2728 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2729 MGMT_STATUS_INVALID_PARAMS
,
2734 if (!hdev_is_powered(hdev
)) {
2735 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2736 MGMT_STATUS_NOT_POWERED
, &rp
,
2741 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
2742 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2743 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
2748 sec_level
= BT_SECURITY_MEDIUM
;
2749 auth_type
= HCI_AT_DEDICATED_BONDING
;
2751 if (cp
->addr
.type
== BDADDR_BREDR
) {
2752 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2755 u8 addr_type
= le_addr_type(cp
->addr
.type
);
2756 struct hci_conn_params
*p
;
2758 /* When pairing a new device, it is expected to remember
2759 * this device for future connections. Adding the connection
2760 * parameter information ahead of time allows tracking
2761 * of the slave preferred values and will speed up any
2762 * further connection establishment.
2764 * If connection parameters already exist, then they
2765 * will be kept and this function does nothing.
2767 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2769 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
2770 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2772 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
,
2773 addr_type
, sec_level
,
2774 HCI_LE_CONN_TIMEOUT
);
2780 if (PTR_ERR(conn
) == -EBUSY
)
2781 status
= MGMT_STATUS_BUSY
;
2782 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
2783 status
= MGMT_STATUS_NOT_SUPPORTED
;
2784 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
2785 status
= MGMT_STATUS_REJECTED
;
2787 status
= MGMT_STATUS_CONNECT_FAILED
;
2789 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2790 status
, &rp
, sizeof(rp
));
2794 if (conn
->connect_cfm_cb
) {
2795 hci_conn_drop(conn
);
2796 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2797 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2801 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2804 hci_conn_drop(conn
);
2808 cmd
->cmd_complete
= pairing_complete
;
2810 /* For LE, just connecting isn't a proof that the pairing finished */
2811 if (cp
->addr
.type
== BDADDR_BREDR
) {
2812 conn
->connect_cfm_cb
= pairing_complete_cb
;
2813 conn
->security_cfm_cb
= pairing_complete_cb
;
2814 conn
->disconn_cfm_cb
= pairing_complete_cb
;
2816 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
2817 conn
->security_cfm_cb
= le_pairing_complete_cb
;
2818 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
2821 conn
->io_capability
= cp
->io_cap
;
2822 cmd
->user_data
= hci_conn_get(conn
);
2824 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
2825 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
2826 cmd
->cmd_complete(cmd
, 0);
2827 mgmt_pending_remove(cmd
);
2833 hci_dev_unlock(hdev
);
2837 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2840 struct mgmt_addr_info
*addr
= data
;
2841 struct mgmt_pending_cmd
*cmd
;
2842 struct hci_conn
*conn
;
2849 if (!hdev_is_powered(hdev
)) {
2850 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2851 MGMT_STATUS_NOT_POWERED
);
2855 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
2857 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2858 MGMT_STATUS_INVALID_PARAMS
);
2862 conn
= cmd
->user_data
;
2864 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
2865 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2866 MGMT_STATUS_INVALID_PARAMS
);
2870 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
2871 mgmt_pending_remove(cmd
);
2873 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
2874 addr
, sizeof(*addr
));
2876 hci_dev_unlock(hdev
);
2880 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
2881 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
2882 u16 hci_op
, __le32 passkey
)
2884 struct mgmt_pending_cmd
*cmd
;
2885 struct hci_conn
*conn
;
2890 if (!hdev_is_powered(hdev
)) {
2891 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2892 MGMT_STATUS_NOT_POWERED
, addr
,
2897 if (addr
->type
== BDADDR_BREDR
)
2898 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
2900 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
2901 le_addr_type(addr
->type
));
2904 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2905 MGMT_STATUS_NOT_CONNECTED
, addr
,
2910 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
2911 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
2913 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2914 MGMT_STATUS_SUCCESS
, addr
,
2917 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2918 MGMT_STATUS_FAILED
, addr
,
2924 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
2930 cmd
->cmd_complete
= addr_cmd_complete
;
2932 /* Continue with pairing via HCI */
2933 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
2934 struct hci_cp_user_passkey_reply cp
;
2936 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
2937 cp
.passkey
= passkey
;
2938 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
2940 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
2944 mgmt_pending_remove(cmd
);
2947 hci_dev_unlock(hdev
);
2951 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2952 void *data
, u16 len
)
2954 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
2958 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2959 MGMT_OP_PIN_CODE_NEG_REPLY
,
2960 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
2963 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2966 struct mgmt_cp_user_confirm_reply
*cp
= data
;
2970 if (len
!= sizeof(*cp
))
2971 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
2972 MGMT_STATUS_INVALID_PARAMS
);
2974 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2975 MGMT_OP_USER_CONFIRM_REPLY
,
2976 HCI_OP_USER_CONFIRM_REPLY
, 0);
2979 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2980 void *data
, u16 len
)
2982 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
2986 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2987 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
2988 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
2991 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2994 struct mgmt_cp_user_passkey_reply
*cp
= data
;
2998 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2999 MGMT_OP_USER_PASSKEY_REPLY
,
3000 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3003 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3004 void *data
, u16 len
)
3006 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3010 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3011 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3012 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3015 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3017 struct mgmt_cp_set_local_name
*cp
;
3018 struct mgmt_pending_cmd
*cmd
;
3020 BT_DBG("status 0x%02x", status
);
3024 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3031 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3032 mgmt_status(status
));
3034 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3037 mgmt_pending_remove(cmd
);
3040 hci_dev_unlock(hdev
);
3043 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3046 struct mgmt_cp_set_local_name
*cp
= data
;
3047 struct mgmt_pending_cmd
*cmd
;
3048 struct hci_request req
;
3055 /* If the old values are the same as the new ones just return a
3056 * direct command complete event.
3058 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3059 !memcmp(hdev
->short_name
, cp
->short_name
,
3060 sizeof(hdev
->short_name
))) {
3061 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3066 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3068 if (!hdev_is_powered(hdev
)) {
3069 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3071 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3076 err
= mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
,
3077 len
, HCI_MGMT_LOCAL_NAME_EVENTS
, sk
);
3078 ext_info_changed(hdev
, sk
);
3083 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3089 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3091 hci_req_init(&req
, hdev
);
3093 if (lmp_bredr_capable(hdev
)) {
3094 __hci_req_update_name(&req
);
3095 __hci_req_update_eir(&req
);
3098 /* The name is stored in the scan response data and so
3099 * no need to udpate the advertising data here.
3101 if (lmp_le_capable(hdev
))
3102 __hci_req_update_scan_rsp_data(&req
, hdev
->cur_adv_instance
);
3104 err
= hci_req_run(&req
, set_name_complete
);
3106 mgmt_pending_remove(cmd
);
3109 hci_dev_unlock(hdev
);
3113 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3114 u16 opcode
, struct sk_buff
*skb
)
3116 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3117 size_t rp_size
= sizeof(mgmt_rp
);
3118 struct mgmt_pending_cmd
*cmd
;
3120 BT_DBG("%s status %u", hdev
->name
, status
);
3122 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3126 if (status
|| !skb
) {
3127 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3128 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3132 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3134 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3135 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3137 if (skb
->len
< sizeof(*rp
)) {
3138 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3139 MGMT_OP_READ_LOCAL_OOB_DATA
,
3140 MGMT_STATUS_FAILED
);
3144 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3145 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3147 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3149 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3151 if (skb
->len
< sizeof(*rp
)) {
3152 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3153 MGMT_OP_READ_LOCAL_OOB_DATA
,
3154 MGMT_STATUS_FAILED
);
3158 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3159 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3161 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3162 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3165 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3166 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3169 mgmt_pending_remove(cmd
);
3172 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3173 void *data
, u16 data_len
)
3175 struct mgmt_pending_cmd
*cmd
;
3176 struct hci_request req
;
3179 BT_DBG("%s", hdev
->name
);
3183 if (!hdev_is_powered(hdev
)) {
3184 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3185 MGMT_STATUS_NOT_POWERED
);
3189 if (!lmp_ssp_capable(hdev
)) {
3190 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3191 MGMT_STATUS_NOT_SUPPORTED
);
3195 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3196 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3201 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3207 hci_req_init(&req
, hdev
);
3209 if (bredr_sc_enabled(hdev
))
3210 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
3212 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3214 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
3216 mgmt_pending_remove(cmd
);
3219 hci_dev_unlock(hdev
);
3223 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3224 void *data
, u16 len
)
3226 struct mgmt_addr_info
*addr
= data
;
3229 BT_DBG("%s ", hdev
->name
);
3231 if (!bdaddr_type_is_valid(addr
->type
))
3232 return mgmt_cmd_complete(sk
, hdev
->id
,
3233 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3234 MGMT_STATUS_INVALID_PARAMS
,
3235 addr
, sizeof(*addr
));
3239 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3240 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3243 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3244 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3245 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3246 MGMT_STATUS_INVALID_PARAMS
,
3247 &cp
->addr
, sizeof(cp
->addr
));
3251 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3252 cp
->addr
.type
, cp
->hash
,
3253 cp
->rand
, NULL
, NULL
);
3255 status
= MGMT_STATUS_FAILED
;
3257 status
= MGMT_STATUS_SUCCESS
;
3259 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3260 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
3261 &cp
->addr
, sizeof(cp
->addr
));
3262 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3263 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3264 u8
*rand192
, *hash192
, *rand256
, *hash256
;
3267 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3268 /* Enforce zero-valued 192-bit parameters as
3269 * long as legacy SMP OOB isn't implemented.
3271 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3272 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3273 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3274 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3275 MGMT_STATUS_INVALID_PARAMS
,
3276 addr
, sizeof(*addr
));
3283 /* In case one of the P-192 values is set to zero,
3284 * then just disable OOB data for P-192.
3286 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3287 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3291 rand192
= cp
->rand192
;
3292 hash192
= cp
->hash192
;
3296 /* In case one of the P-256 values is set to zero, then just
3297 * disable OOB data for P-256.
3299 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
3300 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
3304 rand256
= cp
->rand256
;
3305 hash256
= cp
->hash256
;
3308 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3309 cp
->addr
.type
, hash192
, rand192
,
3312 status
= MGMT_STATUS_FAILED
;
3314 status
= MGMT_STATUS_SUCCESS
;
3316 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3317 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3318 status
, &cp
->addr
, sizeof(cp
->addr
));
3320 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3321 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3322 MGMT_STATUS_INVALID_PARAMS
);
3326 hci_dev_unlock(hdev
);
3330 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3331 void *data
, u16 len
)
3333 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3337 BT_DBG("%s", hdev
->name
);
3339 if (cp
->addr
.type
!= BDADDR_BREDR
)
3340 return mgmt_cmd_complete(sk
, hdev
->id
,
3341 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3342 MGMT_STATUS_INVALID_PARAMS
,
3343 &cp
->addr
, sizeof(cp
->addr
));
3347 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
3348 hci_remote_oob_data_clear(hdev
);
3349 status
= MGMT_STATUS_SUCCESS
;
3353 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3355 status
= MGMT_STATUS_INVALID_PARAMS
;
3357 status
= MGMT_STATUS_SUCCESS
;
3360 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3361 status
, &cp
->addr
, sizeof(cp
->addr
));
3363 hci_dev_unlock(hdev
);
3367 void mgmt_start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3369 struct mgmt_pending_cmd
*cmd
;
3371 BT_DBG("status %d", status
);
3375 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3377 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
3380 cmd
= pending_find(MGMT_OP_START_LIMITED_DISCOVERY
, hdev
);
3383 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3384 mgmt_pending_remove(cmd
);
3387 hci_dev_unlock(hdev
);
3390 static bool discovery_type_is_valid(struct hci_dev
*hdev
, uint8_t type
,
3391 uint8_t *mgmt_status
)
3394 case DISCOV_TYPE_LE
:
3395 *mgmt_status
= mgmt_le_support(hdev
);
3399 case DISCOV_TYPE_INTERLEAVED
:
3400 *mgmt_status
= mgmt_le_support(hdev
);
3403 /* Intentional fall-through */
3404 case DISCOV_TYPE_BREDR
:
3405 *mgmt_status
= mgmt_bredr_support(hdev
);
3410 *mgmt_status
= MGMT_STATUS_INVALID_PARAMS
;
3417 static int start_discovery_internal(struct sock
*sk
, struct hci_dev
*hdev
,
3418 u16 op
, void *data
, u16 len
)
3420 struct mgmt_cp_start_discovery
*cp
= data
;
3421 struct mgmt_pending_cmd
*cmd
;
3425 BT_DBG("%s", hdev
->name
);
3429 if (!hdev_is_powered(hdev
)) {
3430 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
,
3431 MGMT_STATUS_NOT_POWERED
,
3432 &cp
->type
, sizeof(cp
->type
));
3436 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3437 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
3438 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
3439 &cp
->type
, sizeof(cp
->type
));
3443 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
3444 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, status
,
3445 &cp
->type
, sizeof(cp
->type
));
3449 /* Clear the discovery filter first to free any previously
3450 * allocated memory for the UUID list.
3452 hci_discovery_filter_clear(hdev
);
3454 hdev
->discovery
.type
= cp
->type
;
3455 hdev
->discovery
.report_invalid_rssi
= false;
3456 if (op
== MGMT_OP_START_LIMITED_DISCOVERY
)
3457 hdev
->discovery
.limited
= true;
3459 hdev
->discovery
.limited
= false;
3461 cmd
= mgmt_pending_add(sk
, op
, hdev
, data
, len
);
3467 cmd
->cmd_complete
= generic_cmd_complete
;
3469 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3470 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3474 hci_dev_unlock(hdev
);
3478 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3479 void *data
, u16 len
)
3481 return start_discovery_internal(sk
, hdev
, MGMT_OP_START_DISCOVERY
,
3485 static int start_limited_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3486 void *data
, u16 len
)
3488 return start_discovery_internal(sk
, hdev
,
3489 MGMT_OP_START_LIMITED_DISCOVERY
,
3493 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
3496 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
3500 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3501 void *data
, u16 len
)
3503 struct mgmt_cp_start_service_discovery
*cp
= data
;
3504 struct mgmt_pending_cmd
*cmd
;
3505 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
3506 u16 uuid_count
, expected_len
;
3510 BT_DBG("%s", hdev
->name
);
3514 if (!hdev_is_powered(hdev
)) {
3515 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3516 MGMT_OP_START_SERVICE_DISCOVERY
,
3517 MGMT_STATUS_NOT_POWERED
,
3518 &cp
->type
, sizeof(cp
->type
));
3522 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3523 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
3524 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3525 MGMT_OP_START_SERVICE_DISCOVERY
,
3526 MGMT_STATUS_BUSY
, &cp
->type
,
3531 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
3532 if (uuid_count
> max_uuid_count
) {
3533 BT_ERR("service_discovery: too big uuid_count value %u",
3535 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3536 MGMT_OP_START_SERVICE_DISCOVERY
,
3537 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3542 expected_len
= sizeof(*cp
) + uuid_count
* 16;
3543 if (expected_len
!= len
) {
3544 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3546 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3547 MGMT_OP_START_SERVICE_DISCOVERY
,
3548 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3553 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
3554 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3555 MGMT_OP_START_SERVICE_DISCOVERY
,
3556 status
, &cp
->type
, sizeof(cp
->type
));
3560 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
3567 cmd
->cmd_complete
= service_discovery_cmd_complete
;
3569 /* Clear the discovery filter first to free any previously
3570 * allocated memory for the UUID list.
3572 hci_discovery_filter_clear(hdev
);
3574 hdev
->discovery
.result_filtering
= true;
3575 hdev
->discovery
.type
= cp
->type
;
3576 hdev
->discovery
.rssi
= cp
->rssi
;
3577 hdev
->discovery
.uuid_count
= uuid_count
;
3579 if (uuid_count
> 0) {
3580 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
3582 if (!hdev
->discovery
.uuids
) {
3583 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3584 MGMT_OP_START_SERVICE_DISCOVERY
,
3586 &cp
->type
, sizeof(cp
->type
));
3587 mgmt_pending_remove(cmd
);
3592 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3593 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3597 hci_dev_unlock(hdev
);
3601 void mgmt_stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3603 struct mgmt_pending_cmd
*cmd
;
3605 BT_DBG("status %d", status
);
3609 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3611 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3612 mgmt_pending_remove(cmd
);
3615 hci_dev_unlock(hdev
);
3618 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3621 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3622 struct mgmt_pending_cmd
*cmd
;
3625 BT_DBG("%s", hdev
->name
);
3629 if (!hci_discovery_active(hdev
)) {
3630 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3631 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3632 sizeof(mgmt_cp
->type
));
3636 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3637 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3638 MGMT_STATUS_INVALID_PARAMS
,
3639 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3643 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
3649 cmd
->cmd_complete
= generic_cmd_complete
;
3651 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3652 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3656 hci_dev_unlock(hdev
);
3660 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3663 struct mgmt_cp_confirm_name
*cp
= data
;
3664 struct inquiry_entry
*e
;
3667 BT_DBG("%s", hdev
->name
);
3671 if (!hci_discovery_active(hdev
)) {
3672 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3673 MGMT_STATUS_FAILED
, &cp
->addr
,
3678 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3680 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3681 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3686 if (cp
->name_known
) {
3687 e
->name_state
= NAME_KNOWN
;
3690 e
->name_state
= NAME_NEEDED
;
3691 hci_inquiry_cache_update_resolve(hdev
, e
);
3694 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
3695 &cp
->addr
, sizeof(cp
->addr
));
3698 hci_dev_unlock(hdev
);
3702 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3705 struct mgmt_cp_block_device
*cp
= data
;
3709 BT_DBG("%s", hdev
->name
);
3711 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3712 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3713 MGMT_STATUS_INVALID_PARAMS
,
3714 &cp
->addr
, sizeof(cp
->addr
));
3718 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
3721 status
= MGMT_STATUS_FAILED
;
3725 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3727 status
= MGMT_STATUS_SUCCESS
;
3730 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3731 &cp
->addr
, sizeof(cp
->addr
));
3733 hci_dev_unlock(hdev
);
3738 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3741 struct mgmt_cp_unblock_device
*cp
= data
;
3745 BT_DBG("%s", hdev
->name
);
3747 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3748 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3749 MGMT_STATUS_INVALID_PARAMS
,
3750 &cp
->addr
, sizeof(cp
->addr
));
3754 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
3757 status
= MGMT_STATUS_INVALID_PARAMS
;
3761 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3763 status
= MGMT_STATUS_SUCCESS
;
3766 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3767 &cp
->addr
, sizeof(cp
->addr
));
3769 hci_dev_unlock(hdev
);
3774 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3777 struct mgmt_cp_set_device_id
*cp
= data
;
3778 struct hci_request req
;
3782 BT_DBG("%s", hdev
->name
);
3784 source
= __le16_to_cpu(cp
->source
);
3786 if (source
> 0x0002)
3787 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3788 MGMT_STATUS_INVALID_PARAMS
);
3792 hdev
->devid_source
= source
;
3793 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3794 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3795 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3797 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
3800 hci_req_init(&req
, hdev
);
3801 __hci_req_update_eir(&req
);
3802 hci_req_run(&req
, NULL
);
3804 hci_dev_unlock(hdev
);
3809 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
3812 BT_DBG("status %d", status
);
3815 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
3818 struct cmd_lookup match
= { NULL
, hdev
};
3819 struct hci_request req
;
3821 struct adv_info
*adv_instance
;
3827 u8 mgmt_err
= mgmt_status(status
);
3829 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3830 cmd_status_rsp
, &mgmt_err
);
3834 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3835 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
3837 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
3839 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3842 new_settings(hdev
, match
.sk
);
3847 /* If "Set Advertising" was just disabled and instance advertising was
3848 * set up earlier, then re-enable multi-instance advertising.
3850 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
3851 list_empty(&hdev
->adv_instances
))
3854 instance
= hdev
->cur_adv_instance
;
3856 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
3857 struct adv_info
, list
);
3861 instance
= adv_instance
->instance
;
3864 hci_req_init(&req
, hdev
);
3866 err
= __hci_req_schedule_adv_instance(&req
, instance
, true);
3869 err
= hci_req_run(&req
, enable_advertising_instance
);
3872 BT_ERR("Failed to re-configure advertising");
3875 hci_dev_unlock(hdev
);
3878 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3881 struct mgmt_mode
*cp
= data
;
3882 struct mgmt_pending_cmd
*cmd
;
3883 struct hci_request req
;
3887 BT_DBG("request for %s", hdev
->name
);
3889 status
= mgmt_le_support(hdev
);
3891 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3894 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
3895 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3896 MGMT_STATUS_INVALID_PARAMS
);
3902 /* The following conditions are ones which mean that we should
3903 * not do any HCI communication but directly send a mgmt
3904 * response to user space (after toggling the flag if
3907 if (!hdev_is_powered(hdev
) ||
3908 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
3909 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
3910 hci_conn_num(hdev
, LE_LINK
) > 0 ||
3911 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
3912 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
3916 hdev
->cur_adv_instance
= 0x00;
3917 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
3918 if (cp
->val
== 0x02)
3919 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
3921 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
3923 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
3924 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
3927 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3932 err
= new_settings(hdev
, sk
);
3937 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3938 pending_find(MGMT_OP_SET_LE
, hdev
)) {
3939 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3944 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3950 hci_req_init(&req
, hdev
);
3952 if (cp
->val
== 0x02)
3953 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
3955 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
3957 cancel_adv_timeout(hdev
);
3960 /* Switch to instance "0" for the Set Advertising setting.
3961 * We cannot use update_[adv|scan_rsp]_data() here as the
3962 * HCI_ADVERTISING flag is not yet set.
3964 hdev
->cur_adv_instance
= 0x00;
3965 __hci_req_update_adv_data(&req
, 0x00);
3966 __hci_req_update_scan_rsp_data(&req
, 0x00);
3967 __hci_req_enable_advertising(&req
);
3969 __hci_req_disable_advertising(&req
);
3972 err
= hci_req_run(&req
, set_advertising_complete
);
3974 mgmt_pending_remove(cmd
);
3977 hci_dev_unlock(hdev
);
3981 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3982 void *data
, u16 len
)
3984 struct mgmt_cp_set_static_address
*cp
= data
;
3987 BT_DBG("%s", hdev
->name
);
3989 if (!lmp_le_capable(hdev
))
3990 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3991 MGMT_STATUS_NOT_SUPPORTED
);
3993 if (hdev_is_powered(hdev
))
3994 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3995 MGMT_STATUS_REJECTED
);
3997 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3998 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3999 return mgmt_cmd_status(sk
, hdev
->id
,
4000 MGMT_OP_SET_STATIC_ADDRESS
,
4001 MGMT_STATUS_INVALID_PARAMS
);
4003 /* Two most significant bits shall be set */
4004 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4005 return mgmt_cmd_status(sk
, hdev
->id
,
4006 MGMT_OP_SET_STATIC_ADDRESS
,
4007 MGMT_STATUS_INVALID_PARAMS
);
4012 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4014 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4018 err
= new_settings(hdev
, sk
);
4021 hci_dev_unlock(hdev
);
4025 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4026 void *data
, u16 len
)
4028 struct mgmt_cp_set_scan_params
*cp
= data
;
4029 __u16 interval
, window
;
4032 BT_DBG("%s", hdev
->name
);
4034 if (!lmp_le_capable(hdev
))
4035 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4036 MGMT_STATUS_NOT_SUPPORTED
);
4038 interval
= __le16_to_cpu(cp
->interval
);
4040 if (interval
< 0x0004 || interval
> 0x4000)
4041 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4042 MGMT_STATUS_INVALID_PARAMS
);
4044 window
= __le16_to_cpu(cp
->window
);
4046 if (window
< 0x0004 || window
> 0x4000)
4047 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4048 MGMT_STATUS_INVALID_PARAMS
);
4050 if (window
> interval
)
4051 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4052 MGMT_STATUS_INVALID_PARAMS
);
4056 hdev
->le_scan_interval
= interval
;
4057 hdev
->le_scan_window
= window
;
4059 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
4062 /* If background scan is running, restart it so new parameters are
4065 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4066 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4067 struct hci_request req
;
4069 hci_req_init(&req
, hdev
);
4071 hci_req_add_le_scan_disable(&req
);
4072 hci_req_add_le_passive_scan(&req
);
4074 hci_req_run(&req
, NULL
);
4077 hci_dev_unlock(hdev
);
4082 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
4085 struct mgmt_pending_cmd
*cmd
;
4087 BT_DBG("status 0x%02x", status
);
4091 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4096 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4097 mgmt_status(status
));
4099 struct mgmt_mode
*cp
= cmd
->param
;
4102 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
4104 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4106 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4107 new_settings(hdev
, cmd
->sk
);
4110 mgmt_pending_remove(cmd
);
4113 hci_dev_unlock(hdev
);
4116 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4117 void *data
, u16 len
)
4119 struct mgmt_mode
*cp
= data
;
4120 struct mgmt_pending_cmd
*cmd
;
4121 struct hci_request req
;
4124 BT_DBG("%s", hdev
->name
);
4126 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
4127 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4128 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4129 MGMT_STATUS_NOT_SUPPORTED
);
4131 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4132 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4133 MGMT_STATUS_INVALID_PARAMS
);
4137 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4138 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4143 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
4144 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4149 if (!hdev_is_powered(hdev
)) {
4150 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
4151 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4153 new_settings(hdev
, sk
);
4157 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4164 hci_req_init(&req
, hdev
);
4166 __hci_req_write_fast_connectable(&req
, cp
->val
);
4168 err
= hci_req_run(&req
, fast_connectable_complete
);
4170 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4171 MGMT_STATUS_FAILED
);
4172 mgmt_pending_remove(cmd
);
4176 hci_dev_unlock(hdev
);
4181 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4183 struct mgmt_pending_cmd
*cmd
;
4185 BT_DBG("status 0x%02x", status
);
4189 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
4194 u8 mgmt_err
= mgmt_status(status
);
4196 /* We need to restore the flag if related HCI commands
4199 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
4201 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4203 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4204 new_settings(hdev
, cmd
->sk
);
4207 mgmt_pending_remove(cmd
);
4210 hci_dev_unlock(hdev
);
4213 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4215 struct mgmt_mode
*cp
= data
;
4216 struct mgmt_pending_cmd
*cmd
;
4217 struct hci_request req
;
4220 BT_DBG("request for %s", hdev
->name
);
4222 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4223 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4224 MGMT_STATUS_NOT_SUPPORTED
);
4226 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
4227 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4228 MGMT_STATUS_REJECTED
);
4230 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4231 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4232 MGMT_STATUS_INVALID_PARAMS
);
4236 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4237 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4241 if (!hdev_is_powered(hdev
)) {
4243 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
4244 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
4245 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
4246 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4247 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
4250 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
4252 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4256 err
= new_settings(hdev
, sk
);
4260 /* Reject disabling when powered on */
4262 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4263 MGMT_STATUS_REJECTED
);
4266 /* When configuring a dual-mode controller to operate
4267 * with LE only and using a static address, then switching
4268 * BR/EDR back on is not allowed.
4270 * Dual-mode controllers shall operate with the public
4271 * address as its identity address for BR/EDR and LE. So
4272 * reject the attempt to create an invalid configuration.
4274 * The same restrictions applies when secure connections
4275 * has been enabled. For BR/EDR this is a controller feature
4276 * while for LE it is a host stack feature. This means that
4277 * switching BR/EDR back on when secure connections has been
4278 * enabled is not a supported transaction.
4280 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
4281 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
4282 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
4283 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4284 MGMT_STATUS_REJECTED
);
4289 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4290 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4295 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4301 /* We need to flip the bit already here so that
4302 * hci_req_update_adv_data generates the correct flags.
4304 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
4306 hci_req_init(&req
, hdev
);
4308 __hci_req_write_fast_connectable(&req
, false);
4309 __hci_req_update_scan(&req
);
4311 /* Since only the advertising data flags will change, there
4312 * is no need to update the scan response data.
4314 __hci_req_update_adv_data(&req
, hdev
->cur_adv_instance
);
4316 err
= hci_req_run(&req
, set_bredr_complete
);
4318 mgmt_pending_remove(cmd
);
4321 hci_dev_unlock(hdev
);
4325 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4327 struct mgmt_pending_cmd
*cmd
;
4328 struct mgmt_mode
*cp
;
4330 BT_DBG("%s status %u", hdev
->name
, status
);
4334 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
4339 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
4340 mgmt_status(status
));
4348 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
4349 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4352 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
4353 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4356 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
4357 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
4361 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4362 new_settings(hdev
, cmd
->sk
);
4365 mgmt_pending_remove(cmd
);
4367 hci_dev_unlock(hdev
);
4370 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4371 void *data
, u16 len
)
4373 struct mgmt_mode
*cp
= data
;
4374 struct mgmt_pending_cmd
*cmd
;
4375 struct hci_request req
;
4379 BT_DBG("request for %s", hdev
->name
);
4381 if (!lmp_sc_capable(hdev
) &&
4382 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
4383 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4384 MGMT_STATUS_NOT_SUPPORTED
);
4386 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
4387 lmp_sc_capable(hdev
) &&
4388 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
4389 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4390 MGMT_STATUS_REJECTED
);
4392 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4393 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4394 MGMT_STATUS_INVALID_PARAMS
);
4398 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
4399 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4403 changed
= !hci_dev_test_and_set_flag(hdev
,
4405 if (cp
->val
== 0x02)
4406 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
4408 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4410 changed
= hci_dev_test_and_clear_flag(hdev
,
4412 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4415 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4420 err
= new_settings(hdev
, sk
);
4425 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4426 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4433 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
4434 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
4435 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4439 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4445 hci_req_init(&req
, hdev
);
4446 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4447 err
= hci_req_run(&req
, sc_enable_complete
);
4449 mgmt_pending_remove(cmd
);
4454 hci_dev_unlock(hdev
);
4458 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4459 void *data
, u16 len
)
4461 struct mgmt_mode
*cp
= data
;
4462 bool changed
, use_changed
;
4465 BT_DBG("request for %s", hdev
->name
);
4467 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4468 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4469 MGMT_STATUS_INVALID_PARAMS
);
4474 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
4476 changed
= hci_dev_test_and_clear_flag(hdev
,
4477 HCI_KEEP_DEBUG_KEYS
);
4479 if (cp
->val
== 0x02)
4480 use_changed
= !hci_dev_test_and_set_flag(hdev
,
4481 HCI_USE_DEBUG_KEYS
);
4483 use_changed
= hci_dev_test_and_clear_flag(hdev
,
4484 HCI_USE_DEBUG_KEYS
);
4486 if (hdev_is_powered(hdev
) && use_changed
&&
4487 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
4488 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4489 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4490 sizeof(mode
), &mode
);
4493 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4498 err
= new_settings(hdev
, sk
);
4501 hci_dev_unlock(hdev
);
4505 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4508 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4512 BT_DBG("request for %s", hdev
->name
);
4514 if (!lmp_le_capable(hdev
))
4515 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4516 MGMT_STATUS_NOT_SUPPORTED
);
4518 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01 && cp
->privacy
!= 0x02)
4519 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4520 MGMT_STATUS_INVALID_PARAMS
);
4522 if (hdev_is_powered(hdev
))
4523 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4524 MGMT_STATUS_REJECTED
);
4528 /* If user space supports this command it is also expected to
4529 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4531 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
4534 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
4535 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4536 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
4537 if (cp
->privacy
== 0x02)
4538 hci_dev_set_flag(hdev
, HCI_LIMITED_PRIVACY
);
4540 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
4542 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
4543 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4544 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
4545 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
4548 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4553 err
= new_settings(hdev
, sk
);
4556 hci_dev_unlock(hdev
);
4560 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4562 switch (irk
->addr
.type
) {
4563 case BDADDR_LE_PUBLIC
:
4566 case BDADDR_LE_RANDOM
:
4567 /* Two most significant bits shall be set */
4568 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4576 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4579 struct mgmt_cp_load_irks
*cp
= cp_data
;
4580 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4581 sizeof(struct mgmt_irk_info
));
4582 u16 irk_count
, expected_len
;
4585 BT_DBG("request for %s", hdev
->name
);
4587 if (!lmp_le_capable(hdev
))
4588 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4589 MGMT_STATUS_NOT_SUPPORTED
);
4591 irk_count
= __le16_to_cpu(cp
->irk_count
);
4592 if (irk_count
> max_irk_count
) {
4593 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4594 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4595 MGMT_STATUS_INVALID_PARAMS
);
4598 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4599 if (expected_len
!= len
) {
4600 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4602 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4603 MGMT_STATUS_INVALID_PARAMS
);
4606 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4608 for (i
= 0; i
< irk_count
; i
++) {
4609 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4611 if (!irk_is_valid(key
))
4612 return mgmt_cmd_status(sk
, hdev
->id
,
4614 MGMT_STATUS_INVALID_PARAMS
);
4619 hci_smp_irks_clear(hdev
);
4621 for (i
= 0; i
< irk_count
; i
++) {
4622 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4624 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
4625 le_addr_type(irk
->addr
.type
), irk
->val
,
4629 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
4631 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4633 hci_dev_unlock(hdev
);
4638 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4640 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4643 switch (key
->addr
.type
) {
4644 case BDADDR_LE_PUBLIC
:
4647 case BDADDR_LE_RANDOM
:
4648 /* Two most significant bits shall be set */
4649 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4657 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4658 void *cp_data
, u16 len
)
4660 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4661 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
4662 sizeof(struct mgmt_ltk_info
));
4663 u16 key_count
, expected_len
;
4666 BT_DBG("request for %s", hdev
->name
);
4668 if (!lmp_le_capable(hdev
))
4669 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4670 MGMT_STATUS_NOT_SUPPORTED
);
4672 key_count
= __le16_to_cpu(cp
->key_count
);
4673 if (key_count
> max_key_count
) {
4674 BT_ERR("load_ltks: too big key_count value %u", key_count
);
4675 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4676 MGMT_STATUS_INVALID_PARAMS
);
4679 expected_len
= sizeof(*cp
) + key_count
*
4680 sizeof(struct mgmt_ltk_info
);
4681 if (expected_len
!= len
) {
4682 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4684 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4685 MGMT_STATUS_INVALID_PARAMS
);
4688 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4690 for (i
= 0; i
< key_count
; i
++) {
4691 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4693 if (!ltk_is_valid(key
))
4694 return mgmt_cmd_status(sk
, hdev
->id
,
4695 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4696 MGMT_STATUS_INVALID_PARAMS
);
4701 hci_smp_ltks_clear(hdev
);
4703 for (i
= 0; i
< key_count
; i
++) {
4704 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4705 u8 type
, authenticated
;
4707 switch (key
->type
) {
4708 case MGMT_LTK_UNAUTHENTICATED
:
4709 authenticated
= 0x00;
4710 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
4712 case MGMT_LTK_AUTHENTICATED
:
4713 authenticated
= 0x01;
4714 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
4716 case MGMT_LTK_P256_UNAUTH
:
4717 authenticated
= 0x00;
4718 type
= SMP_LTK_P256
;
4720 case MGMT_LTK_P256_AUTH
:
4721 authenticated
= 0x01;
4722 type
= SMP_LTK_P256
;
4724 case MGMT_LTK_P256_DEBUG
:
4725 authenticated
= 0x00;
4726 type
= SMP_LTK_P256_DEBUG
;
4731 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
4732 le_addr_type(key
->addr
.type
), type
, authenticated
,
4733 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
4736 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4739 hci_dev_unlock(hdev
);
4744 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
4746 struct hci_conn
*conn
= cmd
->user_data
;
4747 struct mgmt_rp_get_conn_info rp
;
4750 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
4752 if (status
== MGMT_STATUS_SUCCESS
) {
4753 rp
.rssi
= conn
->rssi
;
4754 rp
.tx_power
= conn
->tx_power
;
4755 rp
.max_tx_power
= conn
->max_tx_power
;
4757 rp
.rssi
= HCI_RSSI_INVALID
;
4758 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4759 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4762 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4763 status
, &rp
, sizeof(rp
));
4765 hci_conn_drop(conn
);
4771 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
4774 struct hci_cp_read_rssi
*cp
;
4775 struct mgmt_pending_cmd
*cmd
;
4776 struct hci_conn
*conn
;
4780 BT_DBG("status 0x%02x", hci_status
);
4784 /* Commands sent in request are either Read RSSI or Read Transmit Power
4785 * Level so we check which one was last sent to retrieve connection
4786 * handle. Both commands have handle as first parameter so it's safe to
4787 * cast data on the same command struct.
4789 * First command sent is always Read RSSI and we fail only if it fails.
4790 * In other case we simply override error to indicate success as we
4791 * already remembered if TX power value is actually valid.
4793 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4795 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4796 status
= MGMT_STATUS_SUCCESS
;
4798 status
= mgmt_status(hci_status
);
4802 BT_ERR("invalid sent_cmd in conn_info response");
4806 handle
= __le16_to_cpu(cp
->handle
);
4807 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4809 BT_ERR("unknown handle (%d) in conn_info response", handle
);
4813 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
4817 cmd
->cmd_complete(cmd
, status
);
4818 mgmt_pending_remove(cmd
);
4821 hci_dev_unlock(hdev
);
4824 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4827 struct mgmt_cp_get_conn_info
*cp
= data
;
4828 struct mgmt_rp_get_conn_info rp
;
4829 struct hci_conn
*conn
;
4830 unsigned long conn_info_age
;
4833 BT_DBG("%s", hdev
->name
);
4835 memset(&rp
, 0, sizeof(rp
));
4836 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4837 rp
.addr
.type
= cp
->addr
.type
;
4839 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4840 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4841 MGMT_STATUS_INVALID_PARAMS
,
4846 if (!hdev_is_powered(hdev
)) {
4847 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4848 MGMT_STATUS_NOT_POWERED
, &rp
,
4853 if (cp
->addr
.type
== BDADDR_BREDR
)
4854 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4857 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4859 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4860 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4861 MGMT_STATUS_NOT_CONNECTED
, &rp
,
4866 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
4867 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4868 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
4872 /* To avoid client trying to guess when to poll again for information we
4873 * calculate conn info age as random value between min/max set in hdev.
4875 conn_info_age
= hdev
->conn_info_min_age
+
4876 prandom_u32_max(hdev
->conn_info_max_age
-
4877 hdev
->conn_info_min_age
);
4879 /* Query controller to refresh cached values if they are too old or were
4882 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4883 msecs_to_jiffies(conn_info_age
)) ||
4884 !conn
->conn_info_timestamp
) {
4885 struct hci_request req
;
4886 struct hci_cp_read_tx_power req_txp_cp
;
4887 struct hci_cp_read_rssi req_rssi_cp
;
4888 struct mgmt_pending_cmd
*cmd
;
4890 hci_req_init(&req
, hdev
);
4891 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4892 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4895 /* For LE links TX power does not change thus we don't need to
4896 * query for it once value is known.
4898 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4899 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4900 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4901 req_txp_cp
.type
= 0x00;
4902 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4903 sizeof(req_txp_cp
), &req_txp_cp
);
4906 /* Max TX power needs to be read only once per connection */
4907 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
4908 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4909 req_txp_cp
.type
= 0x01;
4910 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4911 sizeof(req_txp_cp
), &req_txp_cp
);
4914 err
= hci_req_run(&req
, conn_info_refresh_complete
);
4918 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
4925 hci_conn_hold(conn
);
4926 cmd
->user_data
= hci_conn_get(conn
);
4927 cmd
->cmd_complete
= conn_info_cmd_complete
;
4929 conn
->conn_info_timestamp
= jiffies
;
4931 /* Cache is valid, just reply with values cached in hci_conn */
4932 rp
.rssi
= conn
->rssi
;
4933 rp
.tx_power
= conn
->tx_power
;
4934 rp
.max_tx_power
= conn
->max_tx_power
;
4936 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4937 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4941 hci_dev_unlock(hdev
);
4945 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
4947 struct hci_conn
*conn
= cmd
->user_data
;
4948 struct mgmt_rp_get_clock_info rp
;
4949 struct hci_dev
*hdev
;
4952 memset(&rp
, 0, sizeof(rp
));
4953 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
4958 hdev
= hci_dev_get(cmd
->index
);
4960 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
4965 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
4966 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
4970 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
4974 hci_conn_drop(conn
);
4981 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4983 struct hci_cp_read_clock
*hci_cp
;
4984 struct mgmt_pending_cmd
*cmd
;
4985 struct hci_conn
*conn
;
4987 BT_DBG("%s status %u", hdev
->name
, status
);
4991 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
4995 if (hci_cp
->which
) {
4996 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
4997 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5002 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5006 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5007 mgmt_pending_remove(cmd
);
5010 hci_dev_unlock(hdev
);
5013 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5016 struct mgmt_cp_get_clock_info
*cp
= data
;
5017 struct mgmt_rp_get_clock_info rp
;
5018 struct hci_cp_read_clock hci_cp
;
5019 struct mgmt_pending_cmd
*cmd
;
5020 struct hci_request req
;
5021 struct hci_conn
*conn
;
5024 BT_DBG("%s", hdev
->name
);
5026 memset(&rp
, 0, sizeof(rp
));
5027 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5028 rp
.addr
.type
= cp
->addr
.type
;
5030 if (cp
->addr
.type
!= BDADDR_BREDR
)
5031 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5032 MGMT_STATUS_INVALID_PARAMS
,
5037 if (!hdev_is_powered(hdev
)) {
5038 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5039 MGMT_STATUS_NOT_POWERED
, &rp
,
5044 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5045 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5047 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5048 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5049 MGMT_OP_GET_CLOCK_INFO
,
5050 MGMT_STATUS_NOT_CONNECTED
,
5058 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5064 cmd
->cmd_complete
= clock_info_cmd_complete
;
5066 hci_req_init(&req
, hdev
);
5068 memset(&hci_cp
, 0, sizeof(hci_cp
));
5069 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5072 hci_conn_hold(conn
);
5073 cmd
->user_data
= hci_conn_get(conn
);
5075 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5076 hci_cp
.which
= 0x01; /* Piconet clock */
5077 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5080 err
= hci_req_run(&req
, get_clock_info_complete
);
5082 mgmt_pending_remove(cmd
);
5085 hci_dev_unlock(hdev
);
5089 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
5091 struct hci_conn
*conn
;
5093 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
5097 if (conn
->dst_type
!= type
)
5100 if (conn
->state
!= BT_CONNECTED
)
5106 /* This function requires the caller holds hdev->lock */
5107 static int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
,
5108 u8 addr_type
, u8 auto_connect
)
5110 struct hci_conn_params
*params
;
5112 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
5116 if (params
->auto_connect
== auto_connect
)
5119 list_del_init(¶ms
->action
);
5121 switch (auto_connect
) {
5122 case HCI_AUTO_CONN_DISABLED
:
5123 case HCI_AUTO_CONN_LINK_LOSS
:
5124 /* If auto connect is being disabled when we're trying to
5125 * connect to device, keep connecting.
5127 if (params
->explicit_connect
)
5128 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5130 case HCI_AUTO_CONN_REPORT
:
5131 if (params
->explicit_connect
)
5132 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5134 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
5136 case HCI_AUTO_CONN_DIRECT
:
5137 case HCI_AUTO_CONN_ALWAYS
:
5138 if (!is_connected(hdev
, addr
, addr_type
))
5139 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5143 params
->auto_connect
= auto_connect
;
5145 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
5151 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5152 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5154 struct mgmt_ev_device_added ev
;
5156 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5157 ev
.addr
.type
= type
;
5160 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5163 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5164 void *data
, u16 len
)
5166 struct mgmt_cp_add_device
*cp
= data
;
5167 u8 auto_conn
, addr_type
;
5170 BT_DBG("%s", hdev
->name
);
5172 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5173 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5174 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5175 MGMT_STATUS_INVALID_PARAMS
,
5176 &cp
->addr
, sizeof(cp
->addr
));
5178 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5179 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5180 MGMT_STATUS_INVALID_PARAMS
,
5181 &cp
->addr
, sizeof(cp
->addr
));
5185 if (cp
->addr
.type
== BDADDR_BREDR
) {
5186 /* Only incoming connections action is supported for now */
5187 if (cp
->action
!= 0x01) {
5188 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5190 MGMT_STATUS_INVALID_PARAMS
,
5191 &cp
->addr
, sizeof(cp
->addr
));
5195 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5200 hci_req_update_scan(hdev
);
5205 addr_type
= le_addr_type(cp
->addr
.type
);
5207 if (cp
->action
== 0x02)
5208 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5209 else if (cp
->action
== 0x01)
5210 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5212 auto_conn
= HCI_AUTO_CONN_REPORT
;
5214 /* Kernel internally uses conn_params with resolvable private
5215 * address, but Add Device allows only identity addresses.
5216 * Make sure it is enforced before calling
5217 * hci_conn_params_lookup.
5219 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
5220 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5221 MGMT_STATUS_INVALID_PARAMS
,
5222 &cp
->addr
, sizeof(cp
->addr
));
5226 /* If the connection parameters don't exist for this device,
5227 * they will be created and configured with defaults.
5229 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5231 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5232 MGMT_STATUS_FAILED
, &cp
->addr
,
5237 hci_update_background_scan(hdev
);
5240 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5242 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5243 MGMT_STATUS_SUCCESS
, &cp
->addr
,
5247 hci_dev_unlock(hdev
);
5251 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5252 bdaddr_t
*bdaddr
, u8 type
)
5254 struct mgmt_ev_device_removed ev
;
5256 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5257 ev
.addr
.type
= type
;
5259 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5262 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5263 void *data
, u16 len
)
5265 struct mgmt_cp_remove_device
*cp
= data
;
5268 BT_DBG("%s", hdev
->name
);
5272 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5273 struct hci_conn_params
*params
;
5276 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5277 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5278 MGMT_OP_REMOVE_DEVICE
,
5279 MGMT_STATUS_INVALID_PARAMS
,
5280 &cp
->addr
, sizeof(cp
->addr
));
5284 if (cp
->addr
.type
== BDADDR_BREDR
) {
5285 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5289 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5290 MGMT_OP_REMOVE_DEVICE
,
5291 MGMT_STATUS_INVALID_PARAMS
,
5297 hci_req_update_scan(hdev
);
5299 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5304 addr_type
= le_addr_type(cp
->addr
.type
);
5306 /* Kernel internally uses conn_params with resolvable private
5307 * address, but Remove Device allows only identity addresses.
5308 * Make sure it is enforced before calling
5309 * hci_conn_params_lookup.
5311 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
5312 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5313 MGMT_OP_REMOVE_DEVICE
,
5314 MGMT_STATUS_INVALID_PARAMS
,
5315 &cp
->addr
, sizeof(cp
->addr
));
5319 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5322 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5323 MGMT_OP_REMOVE_DEVICE
,
5324 MGMT_STATUS_INVALID_PARAMS
,
5325 &cp
->addr
, sizeof(cp
->addr
));
5329 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
5330 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
5331 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5332 MGMT_OP_REMOVE_DEVICE
,
5333 MGMT_STATUS_INVALID_PARAMS
,
5334 &cp
->addr
, sizeof(cp
->addr
));
5338 list_del(¶ms
->action
);
5339 list_del(¶ms
->list
);
5341 hci_update_background_scan(hdev
);
5343 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5345 struct hci_conn_params
*p
, *tmp
;
5346 struct bdaddr_list
*b
, *btmp
;
5348 if (cp
->addr
.type
) {
5349 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5350 MGMT_OP_REMOVE_DEVICE
,
5351 MGMT_STATUS_INVALID_PARAMS
,
5352 &cp
->addr
, sizeof(cp
->addr
));
5356 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5357 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5362 hci_req_update_scan(hdev
);
5364 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5365 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5367 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5368 if (p
->explicit_connect
) {
5369 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
5372 list_del(&p
->action
);
5377 BT_DBG("All LE connection parameters were removed");
5379 hci_update_background_scan(hdev
);
5383 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5384 MGMT_STATUS_SUCCESS
, &cp
->addr
,
5387 hci_dev_unlock(hdev
);
5391 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5394 struct mgmt_cp_load_conn_param
*cp
= data
;
5395 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5396 sizeof(struct mgmt_conn_param
));
5397 u16 param_count
, expected_len
;
5400 if (!lmp_le_capable(hdev
))
5401 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5402 MGMT_STATUS_NOT_SUPPORTED
);
5404 param_count
= __le16_to_cpu(cp
->param_count
);
5405 if (param_count
> max_param_count
) {
5406 BT_ERR("load_conn_param: too big param_count value %u",
5408 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5409 MGMT_STATUS_INVALID_PARAMS
);
5412 expected_len
= sizeof(*cp
) + param_count
*
5413 sizeof(struct mgmt_conn_param
);
5414 if (expected_len
!= len
) {
5415 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5417 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5418 MGMT_STATUS_INVALID_PARAMS
);
5421 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5425 hci_conn_params_clear_disabled(hdev
);
5427 for (i
= 0; i
< param_count
; i
++) {
5428 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5429 struct hci_conn_params
*hci_param
;
5430 u16 min
, max
, latency
, timeout
;
5433 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5436 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5437 addr_type
= ADDR_LE_DEV_PUBLIC
;
5438 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5439 addr_type
= ADDR_LE_DEV_RANDOM
;
5441 BT_ERR("Ignoring invalid connection parameters");
5445 min
= le16_to_cpu(param
->min_interval
);
5446 max
= le16_to_cpu(param
->max_interval
);
5447 latency
= le16_to_cpu(param
->latency
);
5448 timeout
= le16_to_cpu(param
->timeout
);
5450 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5451 min
, max
, latency
, timeout
);
5453 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5454 BT_ERR("Ignoring invalid connection parameters");
5458 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5461 BT_ERR("Failed to add connection parameters");
5465 hci_param
->conn_min_interval
= min
;
5466 hci_param
->conn_max_interval
= max
;
5467 hci_param
->conn_latency
= latency
;
5468 hci_param
->supervision_timeout
= timeout
;
5471 hci_dev_unlock(hdev
);
5473 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
5477 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5478 void *data
, u16 len
)
5480 struct mgmt_cp_set_external_config
*cp
= data
;
5484 BT_DBG("%s", hdev
->name
);
5486 if (hdev_is_powered(hdev
))
5487 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5488 MGMT_STATUS_REJECTED
);
5490 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5491 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5492 MGMT_STATUS_INVALID_PARAMS
);
5494 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5495 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5496 MGMT_STATUS_NOT_SUPPORTED
);
5501 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
5503 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
5505 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5512 err
= new_options(hdev
, sk
);
5514 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
5515 mgmt_index_removed(hdev
);
5517 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
5518 hci_dev_set_flag(hdev
, HCI_CONFIG
);
5519 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
5521 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5523 set_bit(HCI_RAW
, &hdev
->flags
);
5524 mgmt_index_added(hdev
);
5529 hci_dev_unlock(hdev
);
5533 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
5534 void *data
, u16 len
)
5536 struct mgmt_cp_set_public_address
*cp
= data
;
5540 BT_DBG("%s", hdev
->name
);
5542 if (hdev_is_powered(hdev
))
5543 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5544 MGMT_STATUS_REJECTED
);
5546 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
5547 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5548 MGMT_STATUS_INVALID_PARAMS
);
5550 if (!hdev
->set_bdaddr
)
5551 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5552 MGMT_STATUS_NOT_SUPPORTED
);
5556 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
5557 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
5559 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
5566 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
5567 err
= new_options(hdev
, sk
);
5569 if (is_configured(hdev
)) {
5570 mgmt_index_removed(hdev
);
5572 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
5574 hci_dev_set_flag(hdev
, HCI_CONFIG
);
5575 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
5577 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5581 hci_dev_unlock(hdev
);
5585 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
5586 u16 opcode
, struct sk_buff
*skb
)
5588 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
5589 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
5590 u8
*h192
, *r192
, *h256
, *r256
;
5591 struct mgmt_pending_cmd
*cmd
;
5595 BT_DBG("%s status %u", hdev
->name
, status
);
5597 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
5601 mgmt_cp
= cmd
->param
;
5604 status
= mgmt_status(status
);
5611 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
5612 struct hci_rp_read_local_oob_data
*rp
;
5614 if (skb
->len
!= sizeof(*rp
)) {
5615 status
= MGMT_STATUS_FAILED
;
5618 status
= MGMT_STATUS_SUCCESS
;
5619 rp
= (void *)skb
->data
;
5621 eir_len
= 5 + 18 + 18;
5628 struct hci_rp_read_local_oob_ext_data
*rp
;
5630 if (skb
->len
!= sizeof(*rp
)) {
5631 status
= MGMT_STATUS_FAILED
;
5634 status
= MGMT_STATUS_SUCCESS
;
5635 rp
= (void *)skb
->data
;
5637 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5638 eir_len
= 5 + 18 + 18;
5642 eir_len
= 5 + 18 + 18 + 18 + 18;
5652 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
5659 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
5660 hdev
->dev_class
, 3);
5663 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5664 EIR_SSP_HASH_C192
, h192
, 16);
5665 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5666 EIR_SSP_RAND_R192
, r192
, 16);
5670 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5671 EIR_SSP_HASH_C256
, h256
, 16);
5672 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5673 EIR_SSP_RAND_R256
, r256
, 16);
5677 mgmt_rp
->type
= mgmt_cp
->type
;
5678 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
5680 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
5681 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
5682 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
5683 if (err
< 0 || status
)
5686 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
5688 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
5689 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
5690 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
5693 mgmt_pending_remove(cmd
);
5696 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
5697 struct mgmt_cp_read_local_oob_ext_data
*cp
)
5699 struct mgmt_pending_cmd
*cmd
;
5700 struct hci_request req
;
5703 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
5708 hci_req_init(&req
, hdev
);
5710 if (bredr_sc_enabled(hdev
))
5711 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
5713 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
5715 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
5717 mgmt_pending_remove(cmd
);
5724 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
5725 void *data
, u16 data_len
)
5727 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
5728 struct mgmt_rp_read_local_oob_ext_data
*rp
;
5731 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
5734 BT_DBG("%s", hdev
->name
);
5736 if (hdev_is_powered(hdev
)) {
5738 case BIT(BDADDR_BREDR
):
5739 status
= mgmt_bredr_support(hdev
);
5745 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
5746 status
= mgmt_le_support(hdev
);
5750 eir_len
= 9 + 3 + 18 + 18 + 3;
5753 status
= MGMT_STATUS_INVALID_PARAMS
;
5758 status
= MGMT_STATUS_NOT_POWERED
;
5762 rp_len
= sizeof(*rp
) + eir_len
;
5763 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
5774 case BIT(BDADDR_BREDR
):
5775 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5776 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
5777 hci_dev_unlock(hdev
);
5781 status
= MGMT_STATUS_FAILED
;
5784 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5786 hdev
->dev_class
, 3);
5789 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
5790 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5791 smp_generate_oob(hdev
, hash
, rand
) < 0) {
5792 hci_dev_unlock(hdev
);
5793 status
= MGMT_STATUS_FAILED
;
5797 /* This should return the active RPA, but since the RPA
5798 * is only programmed on demand, it is really hard to fill
5799 * this in at the moment. For now disallow retrieving
5800 * local out-of-band data when privacy is in use.
5802 * Returning the identity address will not help here since
5803 * pairing happens before the identity resolving key is
5804 * known and thus the connection establishment happens
5805 * based on the RPA and not the identity address.
5807 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
5808 hci_dev_unlock(hdev
);
5809 status
= MGMT_STATUS_REJECTED
;
5813 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
5814 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
5815 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5816 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
5817 memcpy(addr
, &hdev
->static_addr
, 6);
5820 memcpy(addr
, &hdev
->bdaddr
, 6);
5824 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
5825 addr
, sizeof(addr
));
5827 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
5832 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
5833 &role
, sizeof(role
));
5835 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
5836 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5838 hash
, sizeof(hash
));
5840 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5842 rand
, sizeof(rand
));
5845 flags
= mgmt_get_adv_discov_flags(hdev
);
5847 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
5848 flags
|= LE_AD_NO_BREDR
;
5850 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
5851 &flags
, sizeof(flags
));
5855 hci_dev_unlock(hdev
);
5857 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
5859 status
= MGMT_STATUS_SUCCESS
;
5862 rp
->type
= cp
->type
;
5863 rp
->eir_len
= cpu_to_le16(eir_len
);
5865 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
5866 status
, rp
, sizeof(*rp
) + eir_len
);
5867 if (err
< 0 || status
)
5870 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
5871 rp
, sizeof(*rp
) + eir_len
,
5872 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
5880 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
5884 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
5885 flags
|= MGMT_ADV_FLAG_DISCOV
;
5886 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
5887 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
5889 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
5890 flags
|= MGMT_ADV_FLAG_TX_POWER
;
5895 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
5896 void *data
, u16 data_len
)
5898 struct mgmt_rp_read_adv_features
*rp
;
5901 struct adv_info
*adv_instance
;
5902 u32 supported_flags
;
5905 BT_DBG("%s", hdev
->name
);
5907 if (!lmp_le_capable(hdev
))
5908 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
5909 MGMT_STATUS_REJECTED
);
5913 rp_len
= sizeof(*rp
) + hdev
->adv_instance_cnt
;
5914 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
5916 hci_dev_unlock(hdev
);
5920 supported_flags
= get_supported_adv_flags(hdev
);
5922 rp
->supported_flags
= cpu_to_le32(supported_flags
);
5923 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
5924 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
5925 rp
->max_instances
= HCI_MAX_ADV_INSTANCES
;
5926 rp
->num_instances
= hdev
->adv_instance_cnt
;
5928 instance
= rp
->instance
;
5929 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
5930 *instance
= adv_instance
->instance
;
5934 hci_dev_unlock(hdev
);
5936 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
5937 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
5944 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
5945 u8 len
, bool is_adv_data
)
5947 u8 max_len
= HCI_MAX_AD_LENGTH
;
5949 bool flags_managed
= false;
5950 bool tx_power_managed
= false;
5953 if (adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
5954 MGMT_ADV_FLAG_LIMITED_DISCOV
|
5955 MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
5956 flags_managed
= true;
5960 if (adv_flags
& MGMT_ADV_FLAG_TX_POWER
) {
5961 tx_power_managed
= true;
5969 /* Make sure that the data is correctly formatted. */
5970 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
5973 if (flags_managed
&& data
[i
+ 1] == EIR_FLAGS
)
5976 if (tx_power_managed
&& data
[i
+ 1] == EIR_TX_POWER
)
5979 /* If the current field length would exceed the total data
5980 * length, then it's invalid.
5982 if (i
+ cur_len
>= len
)
5989 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
5992 struct mgmt_pending_cmd
*cmd
;
5993 struct mgmt_cp_add_advertising
*cp
;
5994 struct mgmt_rp_add_advertising rp
;
5995 struct adv_info
*adv_instance
, *n
;
5998 BT_DBG("status %d", status
);
6002 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
6004 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
6005 if (!adv_instance
->pending
)
6009 adv_instance
->pending
= false;
6013 instance
= adv_instance
->instance
;
6015 if (hdev
->cur_adv_instance
== instance
)
6016 cancel_adv_timeout(hdev
);
6018 hci_remove_adv_instance(hdev
, instance
);
6019 mgmt_advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
6026 rp
.instance
= cp
->instance
;
6029 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6030 mgmt_status(status
));
6032 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6033 mgmt_status(status
), &rp
, sizeof(rp
));
6035 mgmt_pending_remove(cmd
);
6038 hci_dev_unlock(hdev
);
6041 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6042 void *data
, u16 data_len
)
6044 struct mgmt_cp_add_advertising
*cp
= data
;
6045 struct mgmt_rp_add_advertising rp
;
6047 u32 supported_flags
;
6049 u16 timeout
, duration
;
6050 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
6051 u8 schedule_instance
= 0;
6052 struct adv_info
*next_instance
;
6054 struct mgmt_pending_cmd
*cmd
;
6055 struct hci_request req
;
6057 BT_DBG("%s", hdev
->name
);
6059 status
= mgmt_le_support(hdev
);
6061 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6064 if (cp
->instance
< 1 || cp
->instance
> HCI_MAX_ADV_INSTANCES
)
6065 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6066 MGMT_STATUS_INVALID_PARAMS
);
6068 if (data_len
!= sizeof(*cp
) + cp
->adv_data_len
+ cp
->scan_rsp_len
)
6069 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6070 MGMT_STATUS_INVALID_PARAMS
);
6072 flags
= __le32_to_cpu(cp
->flags
);
6073 timeout
= __le16_to_cpu(cp
->timeout
);
6074 duration
= __le16_to_cpu(cp
->duration
);
6076 /* The current implementation only supports a subset of the specified
6079 supported_flags
= get_supported_adv_flags(hdev
);
6080 if (flags
& ~supported_flags
)
6081 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6082 MGMT_STATUS_INVALID_PARAMS
);
6086 if (timeout
&& !hdev_is_powered(hdev
)) {
6087 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6088 MGMT_STATUS_REJECTED
);
6092 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6093 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6094 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6095 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6100 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
6101 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
6102 cp
->scan_rsp_len
, false)) {
6103 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6104 MGMT_STATUS_INVALID_PARAMS
);
6108 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
6109 cp
->adv_data_len
, cp
->data
,
6111 cp
->data
+ cp
->adv_data_len
,
6114 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6115 MGMT_STATUS_FAILED
);
6119 /* Only trigger an advertising added event if a new instance was
6122 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
6123 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
6125 if (hdev
->cur_adv_instance
== cp
->instance
) {
6126 /* If the currently advertised instance is being changed then
6127 * cancel the current advertising and schedule the next
6128 * instance. If there is only one instance then the overridden
6129 * advertising data will be visible right away.
6131 cancel_adv_timeout(hdev
);
6133 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
6135 schedule_instance
= next_instance
->instance
;
6136 } else if (!hdev
->adv_instance_timeout
) {
6137 /* Immediately advertise the new instance if no other
6138 * instance is currently being advertised.
6140 schedule_instance
= cp
->instance
;
6143 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6144 * there is no instance to be advertised then we have no HCI
6145 * communication to make. Simply return.
6147 if (!hdev_is_powered(hdev
) ||
6148 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
6149 !schedule_instance
) {
6150 rp
.instance
= cp
->instance
;
6151 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6152 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6156 /* We're good to go, update advertising data, parameters, and start
6159 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
6166 hci_req_init(&req
, hdev
);
6168 err
= __hci_req_schedule_adv_instance(&req
, schedule_instance
, true);
6171 err
= hci_req_run(&req
, add_advertising_complete
);
6174 mgmt_pending_remove(cmd
);
6177 hci_dev_unlock(hdev
);
6182 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6185 struct mgmt_pending_cmd
*cmd
;
6186 struct mgmt_cp_remove_advertising
*cp
;
6187 struct mgmt_rp_remove_advertising rp
;
6189 BT_DBG("status %d", status
);
6193 /* A failure status here only means that we failed to disable
6194 * advertising. Otherwise, the advertising instance has been removed,
6195 * so report success.
6197 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
6202 rp
.instance
= cp
->instance
;
6204 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
6206 mgmt_pending_remove(cmd
);
6209 hci_dev_unlock(hdev
);
6212 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6213 void *data
, u16 data_len
)
6215 struct mgmt_cp_remove_advertising
*cp
= data
;
6216 struct mgmt_rp_remove_advertising rp
;
6217 struct mgmt_pending_cmd
*cmd
;
6218 struct hci_request req
;
6221 BT_DBG("%s", hdev
->name
);
6225 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
6226 err
= mgmt_cmd_status(sk
, hdev
->id
,
6227 MGMT_OP_REMOVE_ADVERTISING
,
6228 MGMT_STATUS_INVALID_PARAMS
);
6232 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6233 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6234 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6235 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
6240 if (list_empty(&hdev
->adv_instances
)) {
6241 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
6242 MGMT_STATUS_INVALID_PARAMS
);
6246 hci_req_init(&req
, hdev
);
6248 hci_req_clear_adv_instance(hdev
, sk
, &req
, cp
->instance
, true);
6250 if (list_empty(&hdev
->adv_instances
))
6251 __hci_req_disable_advertising(&req
);
6253 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6254 * flag is set or the device isn't powered then we have no HCI
6255 * communication to make. Simply return.
6257 if (skb_queue_empty(&req
.cmd_q
) ||
6258 !hdev_is_powered(hdev
) ||
6259 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
6260 rp
.instance
= cp
->instance
;
6261 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6262 MGMT_OP_REMOVE_ADVERTISING
,
6263 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6267 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
6274 err
= hci_req_run(&req
, remove_advertising_complete
);
6276 mgmt_pending_remove(cmd
);
6279 hci_dev_unlock(hdev
);
6284 static u8
tlv_data_max_len(u32 adv_flags
, bool is_adv_data
)
6286 u8 max_len
= HCI_MAX_AD_LENGTH
;
6289 if (adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
6290 MGMT_ADV_FLAG_LIMITED_DISCOV
|
6291 MGMT_ADV_FLAG_MANAGED_FLAGS
))
6294 if (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)
6301 static int get_adv_size_info(struct sock
*sk
, struct hci_dev
*hdev
,
6302 void *data
, u16 data_len
)
6304 struct mgmt_cp_get_adv_size_info
*cp
= data
;
6305 struct mgmt_rp_get_adv_size_info rp
;
6306 u32 flags
, supported_flags
;
6309 BT_DBG("%s", hdev
->name
);
6311 if (!lmp_le_capable(hdev
))
6312 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6313 MGMT_STATUS_REJECTED
);
6315 if (cp
->instance
< 1 || cp
->instance
> HCI_MAX_ADV_INSTANCES
)
6316 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6317 MGMT_STATUS_INVALID_PARAMS
);
6319 flags
= __le32_to_cpu(cp
->flags
);
6321 /* The current implementation only supports a subset of the specified
6324 supported_flags
= get_supported_adv_flags(hdev
);
6325 if (flags
& ~supported_flags
)
6326 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6327 MGMT_STATUS_INVALID_PARAMS
);
6329 rp
.instance
= cp
->instance
;
6330 rp
.flags
= cp
->flags
;
6331 rp
.max_adv_data_len
= tlv_data_max_len(flags
, true);
6332 rp
.max_scan_rsp_len
= tlv_data_max_len(flags
, false);
6334 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6335 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6340 static const struct hci_mgmt_handler mgmt_handlers
[] = {
6341 { NULL
}, /* 0x0000 (no command) */
6342 { read_version
, MGMT_READ_VERSION_SIZE
,
6344 HCI_MGMT_UNTRUSTED
},
6345 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
6347 HCI_MGMT_UNTRUSTED
},
6348 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
6350 HCI_MGMT_UNTRUSTED
},
6351 { read_controller_info
, MGMT_READ_INFO_SIZE
,
6352 HCI_MGMT_UNTRUSTED
},
6353 { set_powered
, MGMT_SETTING_SIZE
},
6354 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
6355 { set_connectable
, MGMT_SETTING_SIZE
},
6356 { set_fast_connectable
, MGMT_SETTING_SIZE
},
6357 { set_bondable
, MGMT_SETTING_SIZE
},
6358 { set_link_security
, MGMT_SETTING_SIZE
},
6359 { set_ssp
, MGMT_SETTING_SIZE
},
6360 { set_hs
, MGMT_SETTING_SIZE
},
6361 { set_le
, MGMT_SETTING_SIZE
},
6362 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
6363 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
6364 { add_uuid
, MGMT_ADD_UUID_SIZE
},
6365 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
6366 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
6368 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
6370 { disconnect
, MGMT_DISCONNECT_SIZE
},
6371 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
6372 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
6373 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
6374 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
6375 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
6376 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
6377 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
6378 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
6379 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
6380 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
6381 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
6382 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
6383 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
6385 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
6386 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
6387 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
6388 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
6389 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
6390 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
6391 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
6392 { set_advertising
, MGMT_SETTING_SIZE
},
6393 { set_bredr
, MGMT_SETTING_SIZE
},
6394 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
6395 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
6396 { set_secure_conn
, MGMT_SETTING_SIZE
},
6397 { set_debug_keys
, MGMT_SETTING_SIZE
},
6398 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
6399 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
6401 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
6402 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
6403 { add_device
, MGMT_ADD_DEVICE_SIZE
},
6404 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
6405 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
6407 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
6409 HCI_MGMT_UNTRUSTED
},
6410 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
6411 HCI_MGMT_UNCONFIGURED
|
6412 HCI_MGMT_UNTRUSTED
},
6413 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
6414 HCI_MGMT_UNCONFIGURED
},
6415 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
6416 HCI_MGMT_UNCONFIGURED
},
6417 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
6419 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
6420 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
6422 HCI_MGMT_UNTRUSTED
},
6423 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
6424 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
6426 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
6427 { get_adv_size_info
, MGMT_GET_ADV_SIZE_INFO_SIZE
},
6428 { start_limited_discovery
, MGMT_START_DISCOVERY_SIZE
},
6429 { read_ext_controller_info
,MGMT_READ_EXT_INFO_SIZE
,
6430 HCI_MGMT_UNTRUSTED
},
6433 void mgmt_index_added(struct hci_dev
*hdev
)
6435 struct mgmt_ev_ext_index ev
;
6437 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6440 switch (hdev
->dev_type
) {
6442 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
6443 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
6444 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
6447 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
6448 HCI_MGMT_INDEX_EVENTS
);
6461 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
6462 HCI_MGMT_EXT_INDEX_EVENTS
);
6465 void mgmt_index_removed(struct hci_dev
*hdev
)
6467 struct mgmt_ev_ext_index ev
;
6468 u8 status
= MGMT_STATUS_INVALID_INDEX
;
6470 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6473 switch (hdev
->dev_type
) {
6475 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6477 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
6478 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
6479 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
6482 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
6483 HCI_MGMT_INDEX_EVENTS
);
6496 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
6497 HCI_MGMT_EXT_INDEX_EVENTS
);
6500 /* This function requires the caller holds hdev->lock */
6501 static void restart_le_actions(struct hci_dev
*hdev
)
6503 struct hci_conn_params
*p
;
6505 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
6506 /* Needed for AUTO_OFF case where might not "really"
6507 * have been powered off.
6509 list_del_init(&p
->action
);
6511 switch (p
->auto_connect
) {
6512 case HCI_AUTO_CONN_DIRECT
:
6513 case HCI_AUTO_CONN_ALWAYS
:
6514 list_add(&p
->action
, &hdev
->pend_le_conns
);
6516 case HCI_AUTO_CONN_REPORT
:
6517 list_add(&p
->action
, &hdev
->pend_le_reports
);
6525 void mgmt_power_on(struct hci_dev
*hdev
, int err
)
6527 struct cmd_lookup match
= { NULL
, hdev
};
6529 BT_DBG("err %d", err
);
6534 restart_le_actions(hdev
);
6535 hci_update_background_scan(hdev
);
6538 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6540 new_settings(hdev
, match
.sk
);
6545 hci_dev_unlock(hdev
);
6548 void __mgmt_power_off(struct hci_dev
*hdev
)
6550 struct cmd_lookup match
= { NULL
, hdev
};
6551 u8 status
, zero_cod
[] = { 0, 0, 0 };
6553 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6555 /* If the power off is because of hdev unregistration let
6556 * use the appropriate INVALID_INDEX status. Otherwise use
6557 * NOT_POWERED. We cover both scenarios here since later in
6558 * mgmt_index_removed() any hci_conn callbacks will have already
6559 * been triggered, potentially causing misleading DISCONNECTED
6562 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
6563 status
= MGMT_STATUS_INVALID_INDEX
;
6565 status
= MGMT_STATUS_NOT_POWERED
;
6567 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6569 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0) {
6570 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6571 zero_cod
, sizeof(zero_cod
),
6572 HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
6573 ext_info_changed(hdev
, NULL
);
6576 new_settings(hdev
, match
.sk
);
6582 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6584 struct mgmt_pending_cmd
*cmd
;
6587 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
6591 if (err
== -ERFKILL
)
6592 status
= MGMT_STATUS_RFKILLED
;
6594 status
= MGMT_STATUS_FAILED
;
6596 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6598 mgmt_pending_remove(cmd
);
6601 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6604 struct mgmt_ev_new_link_key ev
;
6606 memset(&ev
, 0, sizeof(ev
));
6608 ev
.store_hint
= persistent
;
6609 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6610 ev
.key
.addr
.type
= BDADDR_BREDR
;
6611 ev
.key
.type
= key
->type
;
6612 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6613 ev
.key
.pin_len
= key
->pin_len
;
6615 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6618 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6620 switch (ltk
->type
) {
6623 if (ltk
->authenticated
)
6624 return MGMT_LTK_AUTHENTICATED
;
6625 return MGMT_LTK_UNAUTHENTICATED
;
6627 if (ltk
->authenticated
)
6628 return MGMT_LTK_P256_AUTH
;
6629 return MGMT_LTK_P256_UNAUTH
;
6630 case SMP_LTK_P256_DEBUG
:
6631 return MGMT_LTK_P256_DEBUG
;
6634 return MGMT_LTK_UNAUTHENTICATED
;
6637 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6639 struct mgmt_ev_new_long_term_key ev
;
6641 memset(&ev
, 0, sizeof(ev
));
6643 /* Devices using resolvable or non-resolvable random addresses
6644 * without providing an identity resolving key don't require
6645 * to store long term keys. Their addresses will change the
6648 * Only when a remote device provides an identity address
6649 * make sure the long term key is stored. If the remote
6650 * identity is known, the long term keys are internally
6651 * mapped to the identity address. So allow static random
6652 * and public addresses here.
6654 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6655 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6656 ev
.store_hint
= 0x00;
6658 ev
.store_hint
= persistent
;
6660 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6661 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6662 ev
.key
.type
= mgmt_ltk_type(key
);
6663 ev
.key
.enc_size
= key
->enc_size
;
6664 ev
.key
.ediv
= key
->ediv
;
6665 ev
.key
.rand
= key
->rand
;
6667 if (key
->type
== SMP_LTK
)
6670 /* Make sure we copy only the significant bytes based on the
6671 * encryption key size, and set the rest of the value to zeroes.
6673 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
6674 memset(ev
.key
.val
+ key
->enc_size
, 0,
6675 sizeof(ev
.key
.val
) - key
->enc_size
);
6677 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6680 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
6682 struct mgmt_ev_new_irk ev
;
6684 memset(&ev
, 0, sizeof(ev
));
6686 ev
.store_hint
= persistent
;
6688 bacpy(&ev
.rpa
, &irk
->rpa
);
6689 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6690 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6691 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6693 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6696 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6699 struct mgmt_ev_new_csrk ev
;
6701 memset(&ev
, 0, sizeof(ev
));
6703 /* Devices using resolvable or non-resolvable random addresses
6704 * without providing an identity resolving key don't require
6705 * to store signature resolving keys. Their addresses will change
6706 * the next time around.
6708 * Only when a remote device provides an identity address
6709 * make sure the signature resolving key is stored. So allow
6710 * static random and public addresses here.
6712 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6713 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6714 ev
.store_hint
= 0x00;
6716 ev
.store_hint
= persistent
;
6718 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6719 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6720 ev
.key
.type
= csrk
->type
;
6721 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6723 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6726 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6727 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6728 u16 max_interval
, u16 latency
, u16 timeout
)
6730 struct mgmt_ev_new_conn_param ev
;
6732 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6735 memset(&ev
, 0, sizeof(ev
));
6736 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6737 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6738 ev
.store_hint
= store_hint
;
6739 ev
.min_interval
= cpu_to_le16(min_interval
);
6740 ev
.max_interval
= cpu_to_le16(max_interval
);
6741 ev
.latency
= cpu_to_le16(latency
);
6742 ev
.timeout
= cpu_to_le16(timeout
);
6744 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6747 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
6748 u32 flags
, u8
*name
, u8 name_len
)
6751 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6754 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
6755 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6757 ev
->flags
= __cpu_to_le32(flags
);
6759 /* We must ensure that the EIR Data fields are ordered and
6760 * unique. Keep it simple for now and avoid the problem by not
6761 * adding any BR/EDR data to the LE adv.
6763 if (conn
->le_adv_data_len
> 0) {
6764 memcpy(&ev
->eir
[eir_len
],
6765 conn
->le_adv_data
, conn
->le_adv_data_len
);
6766 eir_len
= conn
->le_adv_data_len
;
6769 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6772 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
6773 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6775 conn
->dev_class
, 3);
6778 ev
->eir_len
= cpu_to_le16(eir_len
);
6780 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6781 sizeof(*ev
) + eir_len
, NULL
);
6784 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
6786 struct sock
**sk
= data
;
6788 cmd
->cmd_complete(cmd
, 0);
6793 mgmt_pending_remove(cmd
);
6796 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
6798 struct hci_dev
*hdev
= data
;
6799 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6801 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6803 cmd
->cmd_complete(cmd
, 0);
6804 mgmt_pending_remove(cmd
);
6807 bool mgmt_powering_down(struct hci_dev
*hdev
)
6809 struct mgmt_pending_cmd
*cmd
;
6810 struct mgmt_mode
*cp
;
6812 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
6823 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6824 u8 link_type
, u8 addr_type
, u8 reason
,
6825 bool mgmt_connected
)
6827 struct mgmt_ev_device_disconnected ev
;
6828 struct sock
*sk
= NULL
;
6830 /* The connection is still in hci_conn_hash so test for 1
6831 * instead of 0 to know if this is the last one.
6833 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6834 cancel_delayed_work(&hdev
->power_off
);
6835 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6838 if (!mgmt_connected
)
6841 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6844 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6846 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6847 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6850 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6855 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6859 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6860 u8 link_type
, u8 addr_type
, u8 status
)
6862 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6863 struct mgmt_cp_disconnect
*cp
;
6864 struct mgmt_pending_cmd
*cmd
;
6866 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6869 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
6875 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6878 if (cp
->addr
.type
!= bdaddr_type
)
6881 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6882 mgmt_pending_remove(cmd
);
6885 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6886 u8 addr_type
, u8 status
)
6888 struct mgmt_ev_connect_failed ev
;
6890 /* The connection is still in hci_conn_hash so test for 1
6891 * instead of 0 to know if this is the last one.
6893 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6894 cancel_delayed_work(&hdev
->power_off
);
6895 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6898 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6899 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6900 ev
.status
= mgmt_status(status
);
6902 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6905 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6907 struct mgmt_ev_pin_code_request ev
;
6909 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6910 ev
.addr
.type
= BDADDR_BREDR
;
6913 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6916 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6919 struct mgmt_pending_cmd
*cmd
;
6921 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6925 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6926 mgmt_pending_remove(cmd
);
6929 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6932 struct mgmt_pending_cmd
*cmd
;
6934 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6938 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6939 mgmt_pending_remove(cmd
);
6942 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6943 u8 link_type
, u8 addr_type
, u32 value
,
6946 struct mgmt_ev_user_confirm_request ev
;
6948 BT_DBG("%s", hdev
->name
);
6950 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6951 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6952 ev
.confirm_hint
= confirm_hint
;
6953 ev
.value
= cpu_to_le32(value
);
6955 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6959 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6960 u8 link_type
, u8 addr_type
)
6962 struct mgmt_ev_user_passkey_request ev
;
6964 BT_DBG("%s", hdev
->name
);
6966 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6967 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6969 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6973 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6974 u8 link_type
, u8 addr_type
, u8 status
,
6977 struct mgmt_pending_cmd
*cmd
;
6979 cmd
= pending_find(opcode
, hdev
);
6983 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6984 mgmt_pending_remove(cmd
);
6989 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6990 u8 link_type
, u8 addr_type
, u8 status
)
6992 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6993 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6996 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6997 u8 link_type
, u8 addr_type
, u8 status
)
6999 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7001 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
7004 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7005 u8 link_type
, u8 addr_type
, u8 status
)
7007 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7008 status
, MGMT_OP_USER_PASSKEY_REPLY
);
7011 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7012 u8 link_type
, u8 addr_type
, u8 status
)
7014 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7016 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
7019 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7020 u8 link_type
, u8 addr_type
, u32 passkey
,
7023 struct mgmt_ev_passkey_notify ev
;
7025 BT_DBG("%s", hdev
->name
);
7027 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7028 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7029 ev
.passkey
= __cpu_to_le32(passkey
);
7030 ev
.entered
= entered
;
7032 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
7035 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
7037 struct mgmt_ev_auth_failed ev
;
7038 struct mgmt_pending_cmd
*cmd
;
7039 u8 status
= mgmt_status(hci_status
);
7041 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
7042 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7045 cmd
= find_pairing(conn
);
7047 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
7048 cmd
? cmd
->sk
: NULL
);
7051 cmd
->cmd_complete(cmd
, status
);
7052 mgmt_pending_remove(cmd
);
7056 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
7058 struct cmd_lookup match
= { NULL
, hdev
};
7062 u8 mgmt_err
= mgmt_status(status
);
7063 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
7064 cmd_status_rsp
, &mgmt_err
);
7068 if (test_bit(HCI_AUTH
, &hdev
->flags
))
7069 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
7071 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
7073 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
7077 new_settings(hdev
, match
.sk
);
7083 static void clear_eir(struct hci_request
*req
)
7085 struct hci_dev
*hdev
= req
->hdev
;
7086 struct hci_cp_write_eir cp
;
7088 if (!lmp_ext_inq_capable(hdev
))
7091 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
7093 memset(&cp
, 0, sizeof(cp
));
7095 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
7098 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
7100 struct cmd_lookup match
= { NULL
, hdev
};
7101 struct hci_request req
;
7102 bool changed
= false;
7105 u8 mgmt_err
= mgmt_status(status
);
7107 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
7109 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7110 new_settings(hdev
, NULL
);
7113 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
7119 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
7121 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
7123 changed
= hci_dev_test_and_clear_flag(hdev
,
7126 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7129 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
7132 new_settings(hdev
, match
.sk
);
7137 hci_req_init(&req
, hdev
);
7139 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
7140 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
7141 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
7142 sizeof(enable
), &enable
);
7143 __hci_req_update_eir(&req
);
7148 hci_req_run(&req
, NULL
);
7151 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
7153 struct cmd_lookup
*match
= data
;
7155 if (match
->sk
== NULL
) {
7156 match
->sk
= cmd
->sk
;
7157 sock_hold(match
->sk
);
7161 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
7164 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
7166 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
7167 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
7168 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
7171 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
,
7172 3, HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
7173 ext_info_changed(hdev
, NULL
);
7180 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
7182 struct mgmt_cp_set_local_name ev
;
7183 struct mgmt_pending_cmd
*cmd
;
7188 memset(&ev
, 0, sizeof(ev
));
7189 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
7190 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
7192 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
7194 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
7196 /* If this is a HCI command related to powering on the
7197 * HCI dev don't send any mgmt signals.
7199 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
7203 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
7204 HCI_MGMT_LOCAL_NAME_EVENTS
, cmd
? cmd
->sk
: NULL
);
7205 ext_info_changed(hdev
, cmd
? cmd
->sk
: NULL
);
7208 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
7212 for (i
= 0; i
< uuid_count
; i
++) {
7213 if (!memcmp(uuid
, uuids
[i
], 16))
7220 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
7224 while (parsed
< eir_len
) {
7225 u8 field_len
= eir
[0];
7232 if (eir_len
- parsed
< field_len
+ 1)
7236 case EIR_UUID16_ALL
:
7237 case EIR_UUID16_SOME
:
7238 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
7239 memcpy(uuid
, bluetooth_base_uuid
, 16);
7240 uuid
[13] = eir
[i
+ 3];
7241 uuid
[12] = eir
[i
+ 2];
7242 if (has_uuid(uuid
, uuid_count
, uuids
))
7246 case EIR_UUID32_ALL
:
7247 case EIR_UUID32_SOME
:
7248 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
7249 memcpy(uuid
, bluetooth_base_uuid
, 16);
7250 uuid
[15] = eir
[i
+ 5];
7251 uuid
[14] = eir
[i
+ 4];
7252 uuid
[13] = eir
[i
+ 3];
7253 uuid
[12] = eir
[i
+ 2];
7254 if (has_uuid(uuid
, uuid_count
, uuids
))
7258 case EIR_UUID128_ALL
:
7259 case EIR_UUID128_SOME
:
7260 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
7261 memcpy(uuid
, eir
+ i
+ 2, 16);
7262 if (has_uuid(uuid
, uuid_count
, uuids
))
7268 parsed
+= field_len
+ 1;
7269 eir
+= field_len
+ 1;
7275 static void restart_le_scan(struct hci_dev
*hdev
)
7277 /* If controller is not scanning we are done. */
7278 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
7281 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
7282 hdev
->discovery
.scan_start
+
7283 hdev
->discovery
.scan_duration
))
7286 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_restart
,
7287 DISCOV_LE_RESTART_DELAY
);
7290 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
7291 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7293 /* If a RSSI threshold has been specified, and
7294 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7295 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7296 * is set, let it through for further processing, as we might need to
7299 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7300 * the results are also dropped.
7302 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7303 (rssi
== HCI_RSSI_INVALID
||
7304 (rssi
< hdev
->discovery
.rssi
&&
7305 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
7308 if (hdev
->discovery
.uuid_count
!= 0) {
7309 /* If a list of UUIDs is provided in filter, results with no
7310 * matching UUID should be dropped.
7312 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
7313 hdev
->discovery
.uuids
) &&
7314 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
7315 hdev
->discovery
.uuid_count
,
7316 hdev
->discovery
.uuids
))
7320 /* If duplicate filtering does not report RSSI changes, then restart
7321 * scanning to ensure updated result with updated RSSI values.
7323 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
7324 restart_le_scan(hdev
);
7326 /* Validate RSSI value against the RSSI threshold once more. */
7327 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7328 rssi
< hdev
->discovery
.rssi
)
7335 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7336 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
7337 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7340 struct mgmt_ev_device_found
*ev
= (void *)buf
;
7343 /* Don't send events for a non-kernel initiated discovery. With
7344 * LE one exception is if we have pend_le_reports > 0 in which
7345 * case we're doing passive scanning and want these events.
7347 if (!hci_discovery_active(hdev
)) {
7348 if (link_type
== ACL_LINK
)
7350 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
7354 if (hdev
->discovery
.result_filtering
) {
7355 /* We are using service discovery */
7356 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
7361 if (hdev
->discovery
.limited
) {
7362 /* Check for limited discoverable bit */
7364 if (!(dev_class
[1] & 0x20))
7367 u8
*flags
= eir_get_data(eir
, eir_len
, EIR_FLAGS
, NULL
);
7368 if (!flags
|| !(flags
[0] & LE_AD_LIMITED
))
7373 /* Make sure that the buffer is big enough. The 5 extra bytes
7374 * are for the potential CoD field.
7376 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
7379 memset(buf
, 0, sizeof(buf
));
7381 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7382 * RSSI value was reported as 0 when not available. This behavior
7383 * is kept when using device discovery. This is required for full
7384 * backwards compatibility with the API.
7386 * However when using service discovery, the value 127 will be
7387 * returned when the RSSI is not available.
7389 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
7390 link_type
== ACL_LINK
)
7393 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7394 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7396 ev
->flags
= cpu_to_le32(flags
);
7399 /* Copy EIR or advertising data into event */
7400 memcpy(ev
->eir
, eir
, eir_len
);
7402 if (dev_class
&& !eir_get_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7404 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7407 if (scan_rsp_len
> 0)
7408 /* Append scan response data to event */
7409 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
7411 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
7412 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
7414 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
7417 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7418 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
7420 struct mgmt_ev_device_found
*ev
;
7421 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
7424 ev
= (struct mgmt_ev_device_found
*) buf
;
7426 memset(buf
, 0, sizeof(buf
));
7428 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7429 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7432 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
7435 ev
->eir_len
= cpu_to_le16(eir_len
);
7437 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
7440 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
7442 struct mgmt_ev_discovering ev
;
7444 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
7446 memset(&ev
, 0, sizeof(ev
));
7447 ev
.type
= hdev
->discovery
.type
;
7448 ev
.discovering
= discovering
;
7450 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
7453 static struct hci_mgmt_chan chan
= {
7454 .channel
= HCI_CHANNEL_CONTROL
,
7455 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
7456 .handlers
= mgmt_handlers
,
7457 .hdev_init
= mgmt_init_hdev
,
7462 return hci_mgmt_chan_register(&chan
);
7465 void mgmt_exit(void)
7467 hci_mgmt_chan_unregister(&chan
);