Merge remote-tracking branch 'mfd/for-mfd-next'
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 13
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 };
109
110 static const u16 mgmt_events[] = {
111 MGMT_EV_CONTROLLER_ERROR,
112 MGMT_EV_INDEX_ADDED,
113 MGMT_EV_INDEX_REMOVED,
114 MGMT_EV_NEW_SETTINGS,
115 MGMT_EV_CLASS_OF_DEV_CHANGED,
116 MGMT_EV_LOCAL_NAME_CHANGED,
117 MGMT_EV_NEW_LINK_KEY,
118 MGMT_EV_NEW_LONG_TERM_KEY,
119 MGMT_EV_DEVICE_CONNECTED,
120 MGMT_EV_DEVICE_DISCONNECTED,
121 MGMT_EV_CONNECT_FAILED,
122 MGMT_EV_PIN_CODE_REQUEST,
123 MGMT_EV_USER_CONFIRM_REQUEST,
124 MGMT_EV_USER_PASSKEY_REQUEST,
125 MGMT_EV_AUTH_FAILED,
126 MGMT_EV_DEVICE_FOUND,
127 MGMT_EV_DISCOVERING,
128 MGMT_EV_DEVICE_BLOCKED,
129 MGMT_EV_DEVICE_UNBLOCKED,
130 MGMT_EV_DEVICE_UNPAIRED,
131 MGMT_EV_PASSKEY_NOTIFY,
132 MGMT_EV_NEW_IRK,
133 MGMT_EV_NEW_CSRK,
134 MGMT_EV_DEVICE_ADDED,
135 MGMT_EV_DEVICE_REMOVED,
136 MGMT_EV_NEW_CONN_PARAM,
137 MGMT_EV_UNCONF_INDEX_ADDED,
138 MGMT_EV_UNCONF_INDEX_REMOVED,
139 MGMT_EV_NEW_CONFIG_OPTIONS,
140 MGMT_EV_EXT_INDEX_ADDED,
141 MGMT_EV_EXT_INDEX_REMOVED,
142 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
143 MGMT_EV_ADVERTISING_ADDED,
144 MGMT_EV_ADVERTISING_REMOVED,
145 MGMT_EV_EXT_INFO_CHANGED,
146 };
147
148 static const u16 mgmt_untrusted_commands[] = {
149 MGMT_OP_READ_INDEX_LIST,
150 MGMT_OP_READ_INFO,
151 MGMT_OP_READ_UNCONF_INDEX_LIST,
152 MGMT_OP_READ_CONFIG_INFO,
153 MGMT_OP_READ_EXT_INDEX_LIST,
154 MGMT_OP_READ_EXT_INFO,
155 };
156
157 static const u16 mgmt_untrusted_events[] = {
158 MGMT_EV_INDEX_ADDED,
159 MGMT_EV_INDEX_REMOVED,
160 MGMT_EV_NEW_SETTINGS,
161 MGMT_EV_CLASS_OF_DEV_CHANGED,
162 MGMT_EV_LOCAL_NAME_CHANGED,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_EXT_INFO_CHANGED,
169 };
170
171 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
172
173 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
174 "\x00\x00\x00\x00\x00\x00\x00\x00"
175
176 /* HCI to MGMT error code conversion table */
177 static u8 mgmt_status_table[] = {
178 MGMT_STATUS_SUCCESS,
179 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
180 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
181 MGMT_STATUS_FAILED, /* Hardware Failure */
182 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
183 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
184 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
185 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
186 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
187 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
189 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
190 MGMT_STATUS_BUSY, /* Command Disallowed */
191 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
192 MGMT_STATUS_REJECTED, /* Rejected Security */
193 MGMT_STATUS_REJECTED, /* Rejected Personal */
194 MGMT_STATUS_TIMEOUT, /* Host Timeout */
195 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
196 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
197 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
198 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
199 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
200 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
201 MGMT_STATUS_BUSY, /* Repeated Attempts */
202 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
203 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
205 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
206 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
207 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
208 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
209 MGMT_STATUS_FAILED, /* Unspecified Error */
210 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
211 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
212 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
213 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
214 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
215 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
216 MGMT_STATUS_FAILED, /* Unit Link Key Used */
217 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
218 MGMT_STATUS_TIMEOUT, /* Instant Passed */
219 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
220 MGMT_STATUS_FAILED, /* Transaction Collision */
221 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
222 MGMT_STATUS_REJECTED, /* QoS Rejected */
223 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
224 MGMT_STATUS_REJECTED, /* Insufficient Security */
225 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
226 MGMT_STATUS_BUSY, /* Role Switch Pending */
227 MGMT_STATUS_FAILED, /* Slot Violation */
228 MGMT_STATUS_FAILED, /* Role Switch Failed */
229 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
230 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
231 MGMT_STATUS_BUSY, /* Host Busy Pairing */
232 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
233 MGMT_STATUS_BUSY, /* Controller Busy */
234 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
235 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
236 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
237 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
238 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
239 };
240
241 static u8 mgmt_status(u8 hci_status)
242 {
243 if (hci_status < ARRAY_SIZE(mgmt_status_table))
244 return mgmt_status_table[hci_status];
245
246 return MGMT_STATUS_FAILED;
247 }
248
249 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
250 u16 len, int flag)
251 {
252 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
253 flag, NULL);
254 }
255
256 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
257 u16 len, int flag, struct sock *skip_sk)
258 {
259 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 flag, skip_sk);
261 }
262
263 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
264 struct sock *skip_sk)
265 {
266 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 HCI_SOCK_TRUSTED, skip_sk);
268 }
269
270 static u8 le_addr_type(u8 mgmt_addr_type)
271 {
272 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
273 return ADDR_LE_DEV_PUBLIC;
274 else
275 return ADDR_LE_DEV_RANDOM;
276 }
277
278 void mgmt_fill_version_info(void *ver)
279 {
280 struct mgmt_rp_read_version *rp = ver;
281
282 rp->version = MGMT_VERSION;
283 rp->revision = cpu_to_le16(MGMT_REVISION);
284 }
285
286 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
287 u16 data_len)
288 {
289 struct mgmt_rp_read_version rp;
290
291 BT_DBG("sock %p", sk);
292
293 mgmt_fill_version_info(&rp);
294
295 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
296 &rp, sizeof(rp));
297 }
298
299 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
300 u16 data_len)
301 {
302 struct mgmt_rp_read_commands *rp;
303 u16 num_commands, num_events;
304 size_t rp_size;
305 int i, err;
306
307 BT_DBG("sock %p", sk);
308
309 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
310 num_commands = ARRAY_SIZE(mgmt_commands);
311 num_events = ARRAY_SIZE(mgmt_events);
312 } else {
313 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
314 num_events = ARRAY_SIZE(mgmt_untrusted_events);
315 }
316
317 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
318
319 rp = kmalloc(rp_size, GFP_KERNEL);
320 if (!rp)
321 return -ENOMEM;
322
323 rp->num_commands = cpu_to_le16(num_commands);
324 rp->num_events = cpu_to_le16(num_events);
325
326 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
327 __le16 *opcode = rp->opcodes;
328
329 for (i = 0; i < num_commands; i++, opcode++)
330 put_unaligned_le16(mgmt_commands[i], opcode);
331
332 for (i = 0; i < num_events; i++, opcode++)
333 put_unaligned_le16(mgmt_events[i], opcode);
334 } else {
335 __le16 *opcode = rp->opcodes;
336
337 for (i = 0; i < num_commands; i++, opcode++)
338 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
339
340 for (i = 0; i < num_events; i++, opcode++)
341 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
342 }
343
344 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
345 rp, rp_size);
346 kfree(rp);
347
348 return err;
349 }
350
351 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
352 u16 data_len)
353 {
354 struct mgmt_rp_read_index_list *rp;
355 struct hci_dev *d;
356 size_t rp_len;
357 u16 count;
358 int err;
359
360 BT_DBG("sock %p", sk);
361
362 read_lock(&hci_dev_list_lock);
363
364 count = 0;
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (d->dev_type == HCI_PRIMARY &&
367 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
368 count++;
369 }
370
371 rp_len = sizeof(*rp) + (2 * count);
372 rp = kmalloc(rp_len, GFP_ATOMIC);
373 if (!rp) {
374 read_unlock(&hci_dev_list_lock);
375 return -ENOMEM;
376 }
377
378 count = 0;
379 list_for_each_entry(d, &hci_dev_list, list) {
380 if (hci_dev_test_flag(d, HCI_SETUP) ||
381 hci_dev_test_flag(d, HCI_CONFIG) ||
382 hci_dev_test_flag(d, HCI_USER_CHANNEL))
383 continue;
384
385 /* Devices marked as raw-only are neither configured
386 * nor unconfigured controllers.
387 */
388 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
389 continue;
390
391 if (d->dev_type == HCI_PRIMARY &&
392 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
393 rp->index[count++] = cpu_to_le16(d->id);
394 BT_DBG("Added hci%u", d->id);
395 }
396 }
397
398 rp->num_controllers = cpu_to_le16(count);
399 rp_len = sizeof(*rp) + (2 * count);
400
401 read_unlock(&hci_dev_list_lock);
402
403 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
404 0, rp, rp_len);
405
406 kfree(rp);
407
408 return err;
409 }
410
411 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
412 void *data, u16 data_len)
413 {
414 struct mgmt_rp_read_unconf_index_list *rp;
415 struct hci_dev *d;
416 size_t rp_len;
417 u16 count;
418 int err;
419
420 BT_DBG("sock %p", sk);
421
422 read_lock(&hci_dev_list_lock);
423
424 count = 0;
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (d->dev_type == HCI_PRIMARY &&
427 hci_dev_test_flag(d, HCI_UNCONFIGURED))
428 count++;
429 }
430
431 rp_len = sizeof(*rp) + (2 * count);
432 rp = kmalloc(rp_len, GFP_ATOMIC);
433 if (!rp) {
434 read_unlock(&hci_dev_list_lock);
435 return -ENOMEM;
436 }
437
438 count = 0;
439 list_for_each_entry(d, &hci_dev_list, list) {
440 if (hci_dev_test_flag(d, HCI_SETUP) ||
441 hci_dev_test_flag(d, HCI_CONFIG) ||
442 hci_dev_test_flag(d, HCI_USER_CHANNEL))
443 continue;
444
445 /* Devices marked as raw-only are neither configured
446 * nor unconfigured controllers.
447 */
448 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
449 continue;
450
451 if (d->dev_type == HCI_PRIMARY &&
452 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
453 rp->index[count++] = cpu_to_le16(d->id);
454 BT_DBG("Added hci%u", d->id);
455 }
456 }
457
458 rp->num_controllers = cpu_to_le16(count);
459 rp_len = sizeof(*rp) + (2 * count);
460
461 read_unlock(&hci_dev_list_lock);
462
463 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
464 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
465
466 kfree(rp);
467
468 return err;
469 }
470
471 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
472 void *data, u16 data_len)
473 {
474 struct mgmt_rp_read_ext_index_list *rp;
475 struct hci_dev *d;
476 size_t rp_len;
477 u16 count;
478 int err;
479
480 BT_DBG("sock %p", sk);
481
482 read_lock(&hci_dev_list_lock);
483
484 count = 0;
485 list_for_each_entry(d, &hci_dev_list, list) {
486 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
487 count++;
488 }
489
490 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
491 rp = kmalloc(rp_len, GFP_ATOMIC);
492 if (!rp) {
493 read_unlock(&hci_dev_list_lock);
494 return -ENOMEM;
495 }
496
497 count = 0;
498 list_for_each_entry(d, &hci_dev_list, list) {
499 if (hci_dev_test_flag(d, HCI_SETUP) ||
500 hci_dev_test_flag(d, HCI_CONFIG) ||
501 hci_dev_test_flag(d, HCI_USER_CHANNEL))
502 continue;
503
504 /* Devices marked as raw-only are neither configured
505 * nor unconfigured controllers.
506 */
507 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
508 continue;
509
510 if (d->dev_type == HCI_PRIMARY) {
511 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
512 rp->entry[count].type = 0x01;
513 else
514 rp->entry[count].type = 0x00;
515 } else if (d->dev_type == HCI_AMP) {
516 rp->entry[count].type = 0x02;
517 } else {
518 continue;
519 }
520
521 rp->entry[count].bus = d->bus;
522 rp->entry[count++].index = cpu_to_le16(d->id);
523 BT_DBG("Added hci%u", d->id);
524 }
525
526 rp->num_controllers = cpu_to_le16(count);
527 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
528
529 read_unlock(&hci_dev_list_lock);
530
531 /* If this command is called at least once, then all the
532 * default index and unconfigured index events are disabled
533 * and from now on only extended index events are used.
534 */
535 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
536 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
547 static bool is_configured(struct hci_dev *hdev)
548 {
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
550 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
551 return false;
552
553 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
554 !bacmp(&hdev->public_addr, BDADDR_ANY))
555 return false;
556
557 return true;
558 }
559
560 static __le32 get_missing_options(struct hci_dev *hdev)
561 {
562 u32 options = 0;
563
564 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
565 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
566 options |= MGMT_OPTION_EXTERNAL_CONFIG;
567
568 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
569 !bacmp(&hdev->public_addr, BDADDR_ANY))
570 options |= MGMT_OPTION_PUBLIC_ADDRESS;
571
572 return cpu_to_le32(options);
573 }
574
575 static int new_options(struct hci_dev *hdev, struct sock *skip)
576 {
577 __le32 options = get_missing_options(hdev);
578
579 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
580 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
581 }
582
583 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
584 {
585 __le32 options = get_missing_options(hdev);
586
587 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
588 sizeof(options));
589 }
590
591 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
592 void *data, u16 data_len)
593 {
594 struct mgmt_rp_read_config_info rp;
595 u32 options = 0;
596
597 BT_DBG("sock %p %s", sk, hdev->name);
598
599 hci_dev_lock(hdev);
600
601 memset(&rp, 0, sizeof(rp));
602 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
603
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
606
607 if (hdev->set_bdaddr)
608 options |= MGMT_OPTION_PUBLIC_ADDRESS;
609
610 rp.supported_options = cpu_to_le32(options);
611 rp.missing_options = get_missing_options(hdev);
612
613 hci_dev_unlock(hdev);
614
615 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
616 &rp, sizeof(rp));
617 }
618
619 static u32 get_supported_settings(struct hci_dev *hdev)
620 {
621 u32 settings = 0;
622
623 settings |= MGMT_SETTING_POWERED;
624 settings |= MGMT_SETTING_BONDABLE;
625 settings |= MGMT_SETTING_DEBUG_KEYS;
626 settings |= MGMT_SETTING_CONNECTABLE;
627 settings |= MGMT_SETTING_DISCOVERABLE;
628
629 if (lmp_bredr_capable(hdev)) {
630 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
631 settings |= MGMT_SETTING_FAST_CONNECTABLE;
632 settings |= MGMT_SETTING_BREDR;
633 settings |= MGMT_SETTING_LINK_SECURITY;
634
635 if (lmp_ssp_capable(hdev)) {
636 settings |= MGMT_SETTING_SSP;
637 settings |= MGMT_SETTING_HS;
638 }
639
640 if (lmp_sc_capable(hdev))
641 settings |= MGMT_SETTING_SECURE_CONN;
642 }
643
644 if (lmp_le_capable(hdev)) {
645 settings |= MGMT_SETTING_LE;
646 settings |= MGMT_SETTING_ADVERTISING;
647 settings |= MGMT_SETTING_SECURE_CONN;
648 settings |= MGMT_SETTING_PRIVACY;
649 settings |= MGMT_SETTING_STATIC_ADDRESS;
650 }
651
652 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
653 hdev->set_bdaddr)
654 settings |= MGMT_SETTING_CONFIGURATION;
655
656 return settings;
657 }
658
659 static u32 get_current_settings(struct hci_dev *hdev)
660 {
661 u32 settings = 0;
662
663 if (hdev_is_powered(hdev))
664 settings |= MGMT_SETTING_POWERED;
665
666 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
667 settings |= MGMT_SETTING_CONNECTABLE;
668
669 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
670 settings |= MGMT_SETTING_FAST_CONNECTABLE;
671
672 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
673 settings |= MGMT_SETTING_DISCOVERABLE;
674
675 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
676 settings |= MGMT_SETTING_BONDABLE;
677
678 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
679 settings |= MGMT_SETTING_BREDR;
680
681 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
682 settings |= MGMT_SETTING_LE;
683
684 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
685 settings |= MGMT_SETTING_LINK_SECURITY;
686
687 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
688 settings |= MGMT_SETTING_SSP;
689
690 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
691 settings |= MGMT_SETTING_HS;
692
693 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
694 settings |= MGMT_SETTING_ADVERTISING;
695
696 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
697 settings |= MGMT_SETTING_SECURE_CONN;
698
699 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
700 settings |= MGMT_SETTING_DEBUG_KEYS;
701
702 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
703 settings |= MGMT_SETTING_PRIVACY;
704
705 /* The current setting for static address has two purposes. The
706 * first is to indicate if the static address will be used and
707 * the second is to indicate if it is actually set.
708 *
709 * This means if the static address is not configured, this flag
710 * will never be set. If the address is configured, then if the
711 * address is actually used decides if the flag is set or not.
712 *
713 * For single mode LE only controllers and dual-mode controllers
714 * with BR/EDR disabled, the existence of the static address will
715 * be evaluated.
716 */
717 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
718 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
719 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
720 if (bacmp(&hdev->static_addr, BDADDR_ANY))
721 settings |= MGMT_SETTING_STATIC_ADDRESS;
722 }
723
724 return settings;
725 }
726
727 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
728 {
729 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
730 }
731
732 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
733 struct hci_dev *hdev,
734 const void *data)
735 {
736 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
737 }
738
739 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
740 {
741 struct mgmt_pending_cmd *cmd;
742
743 /* If there's a pending mgmt command the flags will not yet have
744 * their final values, so check for this first.
745 */
746 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
747 if (cmd) {
748 struct mgmt_mode *cp = cmd->param;
749 if (cp->val == 0x01)
750 return LE_AD_GENERAL;
751 else if (cp->val == 0x02)
752 return LE_AD_LIMITED;
753 } else {
754 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
755 return LE_AD_LIMITED;
756 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
757 return LE_AD_GENERAL;
758 }
759
760 return 0;
761 }
762
763 bool mgmt_get_connectable(struct hci_dev *hdev)
764 {
765 struct mgmt_pending_cmd *cmd;
766
767 /* If there's a pending mgmt command the flag will not yet have
768 * it's final value, so check for this first.
769 */
770 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
771 if (cmd) {
772 struct mgmt_mode *cp = cmd->param;
773
774 return cp->val;
775 }
776
777 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
778 }
779
780 static void service_cache_off(struct work_struct *work)
781 {
782 struct hci_dev *hdev = container_of(work, struct hci_dev,
783 service_cache.work);
784 struct hci_request req;
785
786 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
787 return;
788
789 hci_req_init(&req, hdev);
790
791 hci_dev_lock(hdev);
792
793 __hci_req_update_eir(&req);
794 __hci_req_update_class(&req);
795
796 hci_dev_unlock(hdev);
797
798 hci_req_run(&req, NULL);
799 }
800
801 static void rpa_expired(struct work_struct *work)
802 {
803 struct hci_dev *hdev = container_of(work, struct hci_dev,
804 rpa_expired.work);
805 struct hci_request req;
806
807 BT_DBG("");
808
809 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
810
811 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
812 return;
813
814 /* The generation of a new RPA and programming it into the
815 * controller happens in the hci_req_enable_advertising()
816 * function.
817 */
818 hci_req_init(&req, hdev);
819 __hci_req_enable_advertising(&req);
820 hci_req_run(&req, NULL);
821 }
822
823 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
824 {
825 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
826 return;
827
828 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
829 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
830
831 /* Non-mgmt controlled devices get this bit set
832 * implicitly so that pairing works for them, however
833 * for mgmt we require user-space to explicitly enable
834 * it
835 */
836 hci_dev_clear_flag(hdev, HCI_BONDABLE);
837 }
838
839 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
840 void *data, u16 data_len)
841 {
842 struct mgmt_rp_read_info rp;
843
844 BT_DBG("sock %p %s", sk, hdev->name);
845
846 hci_dev_lock(hdev);
847
848 memset(&rp, 0, sizeof(rp));
849
850 bacpy(&rp.bdaddr, &hdev->bdaddr);
851
852 rp.version = hdev->hci_ver;
853 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
854
855 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
856 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
857
858 memcpy(rp.dev_class, hdev->dev_class, 3);
859
860 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
861 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
862
863 hci_dev_unlock(hdev);
864
865 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
866 sizeof(rp));
867 }
868
869 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
870 u8 data_len)
871 {
872 eir[eir_len++] = sizeof(type) + data_len;
873 eir[eir_len++] = type;
874 memcpy(&eir[eir_len], data, data_len);
875 eir_len += data_len;
876
877 return eir_len;
878 }
879
880 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
881 void *data, u16 data_len)
882 {
883 struct mgmt_rp_read_ext_info *rp;
884 char buff[512];
885 u16 eir_len = 0;
886 u8 name_len;
887
888 BT_DBG("sock %p %s", sk, hdev->name);
889
890 hci_dev_lock(hdev);
891
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 eir_len = eir_append_data(buff, eir_len,
894 EIR_CLASS_OF_DEV,
895 hdev->dev_class, 3);
896
897 name_len = strlen(hdev->dev_name);
898 eir_len = eir_append_data(buff, eir_len, EIR_NAME_COMPLETE,
899 hdev->dev_name, name_len);
900
901 name_len = strlen(hdev->short_name);
902 eir_len = eir_append_data(buff, eir_len, EIR_NAME_SHORT,
903 hdev->short_name, name_len);
904
905 rp = kzalloc(sizeof(*rp) + eir_len, GFP_KERNEL);
906 if (!rp)
907 return -ENOMEM;
908
909 rp->eir_len = cpu_to_le16(eir_len);
910 memcpy(rp->eir, buff, eir_len);
911
912 bacpy(&rp->bdaddr, &hdev->bdaddr);
913
914 rp->version = hdev->hci_ver;
915 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
916
917 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
918 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
919
920 hci_dev_unlock(hdev);
921
922 /* If this command is called at least once, then the events
923 * for class of device and local name changes are disabled
924 * and only the new extended controller information event
925 * is used.
926 */
927 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
928 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
929 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
930
931 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
932 sizeof(*rp) + eir_len);
933 }
934
935 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
936 {
937 struct mgmt_ev_ext_info_changed ev;
938
939 ev.eir_len = cpu_to_le16(0);
940
941 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, &ev,
942 sizeof(ev), HCI_MGMT_EXT_INFO_EVENTS, skip);
943 }
944
945 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
946 {
947 __le32 settings = cpu_to_le32(get_current_settings(hdev));
948
949 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
950 sizeof(settings));
951 }
952
953 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
954 {
955 BT_DBG("%s status 0x%02x", hdev->name, status);
956
957 if (hci_conn_count(hdev) == 0) {
958 cancel_delayed_work(&hdev->power_off);
959 queue_work(hdev->req_workqueue, &hdev->power_off.work);
960 }
961 }
962
963 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
964 {
965 struct mgmt_ev_advertising_added ev;
966
967 ev.instance = instance;
968
969 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
970 }
971
972 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
973 u8 instance)
974 {
975 struct mgmt_ev_advertising_removed ev;
976
977 ev.instance = instance;
978
979 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
980 }
981
982 static void cancel_adv_timeout(struct hci_dev *hdev)
983 {
984 if (hdev->adv_instance_timeout) {
985 hdev->adv_instance_timeout = 0;
986 cancel_delayed_work(&hdev->adv_instance_expire);
987 }
988 }
989
990 static int clean_up_hci_state(struct hci_dev *hdev)
991 {
992 struct hci_request req;
993 struct hci_conn *conn;
994 bool discov_stopped;
995 int err;
996
997 hci_req_init(&req, hdev);
998
999 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1000 test_bit(HCI_PSCAN, &hdev->flags)) {
1001 u8 scan = 0x00;
1002 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1003 }
1004
1005 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1006
1007 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1008 __hci_req_disable_advertising(&req);
1009
1010 discov_stopped = hci_req_stop_discovery(&req);
1011
1012 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1013 /* 0x15 == Terminated due to Power Off */
1014 __hci_abort_conn(&req, conn, 0x15);
1015 }
1016
1017 err = hci_req_run(&req, clean_up_hci_complete);
1018 if (!err && discov_stopped)
1019 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1020
1021 return err;
1022 }
1023
1024 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1025 u16 len)
1026 {
1027 struct mgmt_mode *cp = data;
1028 struct mgmt_pending_cmd *cmd;
1029 int err;
1030
1031 BT_DBG("request for %s", hdev->name);
1032
1033 if (cp->val != 0x00 && cp->val != 0x01)
1034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1035 MGMT_STATUS_INVALID_PARAMS);
1036
1037 hci_dev_lock(hdev);
1038
1039 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1041 MGMT_STATUS_BUSY);
1042 goto failed;
1043 }
1044
1045 if (!!cp->val == hdev_is_powered(hdev)) {
1046 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1047 goto failed;
1048 }
1049
1050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1051 if (!cmd) {
1052 err = -ENOMEM;
1053 goto failed;
1054 }
1055
1056 if (cp->val) {
1057 queue_work(hdev->req_workqueue, &hdev->power_on);
1058 err = 0;
1059 } else {
1060 /* Disconnect connections, stop scans, etc */
1061 err = clean_up_hci_state(hdev);
1062 if (!err)
1063 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1064 HCI_POWER_OFF_TIMEOUT);
1065
1066 /* ENODATA means there were no HCI commands queued */
1067 if (err == -ENODATA) {
1068 cancel_delayed_work(&hdev->power_off);
1069 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1070 err = 0;
1071 }
1072 }
1073
1074 failed:
1075 hci_dev_unlock(hdev);
1076 return err;
1077 }
1078
1079 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1080 {
1081 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1082
1083 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1084 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1085 }
1086
1087 int mgmt_new_settings(struct hci_dev *hdev)
1088 {
1089 return new_settings(hdev, NULL);
1090 }
1091
1092 struct cmd_lookup {
1093 struct sock *sk;
1094 struct hci_dev *hdev;
1095 u8 mgmt_status;
1096 };
1097
1098 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1099 {
1100 struct cmd_lookup *match = data;
1101
1102 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1103
1104 list_del(&cmd->list);
1105
1106 if (match->sk == NULL) {
1107 match->sk = cmd->sk;
1108 sock_hold(match->sk);
1109 }
1110
1111 mgmt_pending_free(cmd);
1112 }
1113
1114 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1115 {
1116 u8 *status = data;
1117
1118 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1119 mgmt_pending_remove(cmd);
1120 }
1121
1122 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1123 {
1124 if (cmd->cmd_complete) {
1125 u8 *status = data;
1126
1127 cmd->cmd_complete(cmd, *status);
1128 mgmt_pending_remove(cmd);
1129
1130 return;
1131 }
1132
1133 cmd_status_rsp(cmd, data);
1134 }
1135
1136 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1137 {
1138 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1139 cmd->param, cmd->param_len);
1140 }
1141
1142 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1143 {
1144 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1145 cmd->param, sizeof(struct mgmt_addr_info));
1146 }
1147
1148 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1149 {
1150 if (!lmp_bredr_capable(hdev))
1151 return MGMT_STATUS_NOT_SUPPORTED;
1152 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1153 return MGMT_STATUS_REJECTED;
1154 else
1155 return MGMT_STATUS_SUCCESS;
1156 }
1157
1158 static u8 mgmt_le_support(struct hci_dev *hdev)
1159 {
1160 if (!lmp_le_capable(hdev))
1161 return MGMT_STATUS_NOT_SUPPORTED;
1162 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1163 return MGMT_STATUS_REJECTED;
1164 else
1165 return MGMT_STATUS_SUCCESS;
1166 }
1167
1168 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1169 {
1170 struct mgmt_pending_cmd *cmd;
1171
1172 BT_DBG("status 0x%02x", status);
1173
1174 hci_dev_lock(hdev);
1175
1176 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1177 if (!cmd)
1178 goto unlock;
1179
1180 if (status) {
1181 u8 mgmt_err = mgmt_status(status);
1182 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1183 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1184 goto remove_cmd;
1185 }
1186
1187 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1188 hdev->discov_timeout > 0) {
1189 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1190 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1191 }
1192
1193 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1194 new_settings(hdev, cmd->sk);
1195
1196 remove_cmd:
1197 mgmt_pending_remove(cmd);
1198
1199 unlock:
1200 hci_dev_unlock(hdev);
1201 }
1202
1203 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1204 u16 len)
1205 {
1206 struct mgmt_cp_set_discoverable *cp = data;
1207 struct mgmt_pending_cmd *cmd;
1208 u16 timeout;
1209 int err;
1210
1211 BT_DBG("request for %s", hdev->name);
1212
1213 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1214 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1216 MGMT_STATUS_REJECTED);
1217
1218 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1219 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1220 MGMT_STATUS_INVALID_PARAMS);
1221
1222 timeout = __le16_to_cpu(cp->timeout);
1223
1224 /* Disabling discoverable requires that no timeout is set,
1225 * and enabling limited discoverable requires a timeout.
1226 */
1227 if ((cp->val == 0x00 && timeout > 0) ||
1228 (cp->val == 0x02 && timeout == 0))
1229 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1230 MGMT_STATUS_INVALID_PARAMS);
1231
1232 hci_dev_lock(hdev);
1233
1234 if (!hdev_is_powered(hdev) && timeout > 0) {
1235 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1236 MGMT_STATUS_NOT_POWERED);
1237 goto failed;
1238 }
1239
1240 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1241 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1242 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1243 MGMT_STATUS_BUSY);
1244 goto failed;
1245 }
1246
1247 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1248 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1249 MGMT_STATUS_REJECTED);
1250 goto failed;
1251 }
1252
1253 if (!hdev_is_powered(hdev)) {
1254 bool changed = false;
1255
1256 /* Setting limited discoverable when powered off is
1257 * not a valid operation since it requires a timeout
1258 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1259 */
1260 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1261 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1262 changed = true;
1263 }
1264
1265 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1266 if (err < 0)
1267 goto failed;
1268
1269 if (changed)
1270 err = new_settings(hdev, sk);
1271
1272 goto failed;
1273 }
1274
1275 /* If the current mode is the same, then just update the timeout
1276 * value with the new value. And if only the timeout gets updated,
1277 * then no need for any HCI transactions.
1278 */
1279 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1280 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1281 HCI_LIMITED_DISCOVERABLE)) {
1282 cancel_delayed_work(&hdev->discov_off);
1283 hdev->discov_timeout = timeout;
1284
1285 if (cp->val && hdev->discov_timeout > 0) {
1286 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1287 queue_delayed_work(hdev->req_workqueue,
1288 &hdev->discov_off, to);
1289 }
1290
1291 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1292 goto failed;
1293 }
1294
1295 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1296 if (!cmd) {
1297 err = -ENOMEM;
1298 goto failed;
1299 }
1300
1301 /* Cancel any potential discoverable timeout that might be
1302 * still active and store new timeout value. The arming of
1303 * the timeout happens in the complete handler.
1304 */
1305 cancel_delayed_work(&hdev->discov_off);
1306 hdev->discov_timeout = timeout;
1307
1308 if (cp->val)
1309 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1310 else
1311 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1312
1313 /* Limited discoverable mode */
1314 if (cp->val == 0x02)
1315 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1316 else
1317 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1318
1319 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1320 err = 0;
1321
1322 failed:
1323 hci_dev_unlock(hdev);
1324 return err;
1325 }
1326
1327 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1328 {
1329 struct mgmt_pending_cmd *cmd;
1330
1331 BT_DBG("status 0x%02x", status);
1332
1333 hci_dev_lock(hdev);
1334
1335 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1336 if (!cmd)
1337 goto unlock;
1338
1339 if (status) {
1340 u8 mgmt_err = mgmt_status(status);
1341 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1342 goto remove_cmd;
1343 }
1344
1345 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1346 new_settings(hdev, cmd->sk);
1347
1348 remove_cmd:
1349 mgmt_pending_remove(cmd);
1350
1351 unlock:
1352 hci_dev_unlock(hdev);
1353 }
1354
1355 static int set_connectable_update_settings(struct hci_dev *hdev,
1356 struct sock *sk, u8 val)
1357 {
1358 bool changed = false;
1359 int err;
1360
1361 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1362 changed = true;
1363
1364 if (val) {
1365 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1366 } else {
1367 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1368 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1369 }
1370
1371 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1372 if (err < 0)
1373 return err;
1374
1375 if (changed) {
1376 hci_req_update_scan(hdev);
1377 hci_update_background_scan(hdev);
1378 return new_settings(hdev, sk);
1379 }
1380
1381 return 0;
1382 }
1383
1384 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1385 u16 len)
1386 {
1387 struct mgmt_mode *cp = data;
1388 struct mgmt_pending_cmd *cmd;
1389 int err;
1390
1391 BT_DBG("request for %s", hdev->name);
1392
1393 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1394 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1396 MGMT_STATUS_REJECTED);
1397
1398 if (cp->val != 0x00 && cp->val != 0x01)
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1400 MGMT_STATUS_INVALID_PARAMS);
1401
1402 hci_dev_lock(hdev);
1403
1404 if (!hdev_is_powered(hdev)) {
1405 err = set_connectable_update_settings(hdev, sk, cp->val);
1406 goto failed;
1407 }
1408
1409 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1410 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1411 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1412 MGMT_STATUS_BUSY);
1413 goto failed;
1414 }
1415
1416 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1417 if (!cmd) {
1418 err = -ENOMEM;
1419 goto failed;
1420 }
1421
1422 if (cp->val) {
1423 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1424 } else {
1425 if (hdev->discov_timeout > 0)
1426 cancel_delayed_work(&hdev->discov_off);
1427
1428 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1429 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1430 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1431 }
1432
1433 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1434 err = 0;
1435
1436 failed:
1437 hci_dev_unlock(hdev);
1438 return err;
1439 }
1440
1441 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1442 u16 len)
1443 {
1444 struct mgmt_mode *cp = data;
1445 bool changed;
1446 int err;
1447
1448 BT_DBG("request for %s", hdev->name);
1449
1450 if (cp->val != 0x00 && cp->val != 0x01)
1451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1452 MGMT_STATUS_INVALID_PARAMS);
1453
1454 hci_dev_lock(hdev);
1455
1456 if (cp->val)
1457 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1458 else
1459 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1460
1461 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1462 if (err < 0)
1463 goto unlock;
1464
1465 if (changed) {
1466 /* In limited privacy mode the change of bondable mode
1467 * may affect the local advertising address.
1468 */
1469 if (hdev_is_powered(hdev) &&
1470 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1471 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1472 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1473 queue_work(hdev->req_workqueue,
1474 &hdev->discoverable_update);
1475
1476 err = new_settings(hdev, sk);
1477 }
1478
1479 unlock:
1480 hci_dev_unlock(hdev);
1481 return err;
1482 }
1483
1484 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1485 u16 len)
1486 {
1487 struct mgmt_mode *cp = data;
1488 struct mgmt_pending_cmd *cmd;
1489 u8 val, status;
1490 int err;
1491
1492 BT_DBG("request for %s", hdev->name);
1493
1494 status = mgmt_bredr_support(hdev);
1495 if (status)
1496 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1497 status);
1498
1499 if (cp->val != 0x00 && cp->val != 0x01)
1500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1501 MGMT_STATUS_INVALID_PARAMS);
1502
1503 hci_dev_lock(hdev);
1504
1505 if (!hdev_is_powered(hdev)) {
1506 bool changed = false;
1507
1508 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1509 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1510 changed = true;
1511 }
1512
1513 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1514 if (err < 0)
1515 goto failed;
1516
1517 if (changed)
1518 err = new_settings(hdev, sk);
1519
1520 goto failed;
1521 }
1522
1523 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1524 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1525 MGMT_STATUS_BUSY);
1526 goto failed;
1527 }
1528
1529 val = !!cp->val;
1530
1531 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1532 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1533 goto failed;
1534 }
1535
1536 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1537 if (!cmd) {
1538 err = -ENOMEM;
1539 goto failed;
1540 }
1541
1542 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1543 if (err < 0) {
1544 mgmt_pending_remove(cmd);
1545 goto failed;
1546 }
1547
1548 failed:
1549 hci_dev_unlock(hdev);
1550 return err;
1551 }
1552
1553 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1554 {
1555 struct mgmt_mode *cp = data;
1556 struct mgmt_pending_cmd *cmd;
1557 u8 status;
1558 int err;
1559
1560 BT_DBG("request for %s", hdev->name);
1561
1562 status = mgmt_bredr_support(hdev);
1563 if (status)
1564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1565
1566 if (!lmp_ssp_capable(hdev))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1568 MGMT_STATUS_NOT_SUPPORTED);
1569
1570 if (cp->val != 0x00 && cp->val != 0x01)
1571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1572 MGMT_STATUS_INVALID_PARAMS);
1573
1574 hci_dev_lock(hdev);
1575
1576 if (!hdev_is_powered(hdev)) {
1577 bool changed;
1578
1579 if (cp->val) {
1580 changed = !hci_dev_test_and_set_flag(hdev,
1581 HCI_SSP_ENABLED);
1582 } else {
1583 changed = hci_dev_test_and_clear_flag(hdev,
1584 HCI_SSP_ENABLED);
1585 if (!changed)
1586 changed = hci_dev_test_and_clear_flag(hdev,
1587 HCI_HS_ENABLED);
1588 else
1589 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1590 }
1591
1592 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1593 if (err < 0)
1594 goto failed;
1595
1596 if (changed)
1597 err = new_settings(hdev, sk);
1598
1599 goto failed;
1600 }
1601
1602 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1604 MGMT_STATUS_BUSY);
1605 goto failed;
1606 }
1607
1608 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1609 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1610 goto failed;
1611 }
1612
1613 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1614 if (!cmd) {
1615 err = -ENOMEM;
1616 goto failed;
1617 }
1618
1619 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1620 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1621 sizeof(cp->val), &cp->val);
1622
1623 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1624 if (err < 0) {
1625 mgmt_pending_remove(cmd);
1626 goto failed;
1627 }
1628
1629 failed:
1630 hci_dev_unlock(hdev);
1631 return err;
1632 }
1633
1634 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1635 {
1636 struct mgmt_mode *cp = data;
1637 bool changed;
1638 u8 status;
1639 int err;
1640
1641 BT_DBG("request for %s", hdev->name);
1642
1643 status = mgmt_bredr_support(hdev);
1644 if (status)
1645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1646
1647 if (!lmp_ssp_capable(hdev))
1648 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1649 MGMT_STATUS_NOT_SUPPORTED);
1650
1651 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1652 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1653 MGMT_STATUS_REJECTED);
1654
1655 if (cp->val != 0x00 && cp->val != 0x01)
1656 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1657 MGMT_STATUS_INVALID_PARAMS);
1658
1659 hci_dev_lock(hdev);
1660
1661 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1663 MGMT_STATUS_BUSY);
1664 goto unlock;
1665 }
1666
1667 if (cp->val) {
1668 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1669 } else {
1670 if (hdev_is_powered(hdev)) {
1671 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1672 MGMT_STATUS_REJECTED);
1673 goto unlock;
1674 }
1675
1676 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1677 }
1678
1679 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1680 if (err < 0)
1681 goto unlock;
1682
1683 if (changed)
1684 err = new_settings(hdev, sk);
1685
1686 unlock:
1687 hci_dev_unlock(hdev);
1688 return err;
1689 }
1690
1691 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1692 {
1693 struct cmd_lookup match = { NULL, hdev };
1694
1695 hci_dev_lock(hdev);
1696
1697 if (status) {
1698 u8 mgmt_err = mgmt_status(status);
1699
1700 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1701 &mgmt_err);
1702 goto unlock;
1703 }
1704
1705 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1706
1707 new_settings(hdev, match.sk);
1708
1709 if (match.sk)
1710 sock_put(match.sk);
1711
1712 /* Make sure the controller has a good default for
1713 * advertising data. Restrict the update to when LE
1714 * has actually been enabled. During power on, the
1715 * update in powered_update_hci will take care of it.
1716 */
1717 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1718 struct hci_request req;
1719
1720 hci_req_init(&req, hdev);
1721 __hci_req_update_adv_data(&req, 0x00);
1722 __hci_req_update_scan_rsp_data(&req, 0x00);
1723 hci_req_run(&req, NULL);
1724 hci_update_background_scan(hdev);
1725 }
1726
1727 unlock:
1728 hci_dev_unlock(hdev);
1729 }
1730
1731 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1732 {
1733 struct mgmt_mode *cp = data;
1734 struct hci_cp_write_le_host_supported hci_cp;
1735 struct mgmt_pending_cmd *cmd;
1736 struct hci_request req;
1737 int err;
1738 u8 val, enabled;
1739
1740 BT_DBG("request for %s", hdev->name);
1741
1742 if (!lmp_le_capable(hdev))
1743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1744 MGMT_STATUS_NOT_SUPPORTED);
1745
1746 if (cp->val != 0x00 && cp->val != 0x01)
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1748 MGMT_STATUS_INVALID_PARAMS);
1749
1750 /* Bluetooth single mode LE only controllers or dual-mode
1751 * controllers configured as LE only devices, do not allow
1752 * switching LE off. These have either LE enabled explicitly
1753 * or BR/EDR has been previously switched off.
1754 *
1755 * When trying to enable an already enabled LE, then gracefully
1756 * send a positive response. Trying to disable it however will
1757 * result into rejection.
1758 */
1759 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1760 if (cp->val == 0x01)
1761 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1762
1763 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1764 MGMT_STATUS_REJECTED);
1765 }
1766
1767 hci_dev_lock(hdev);
1768
1769 val = !!cp->val;
1770 enabled = lmp_host_le_capable(hdev);
1771
1772 if (!val)
1773 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1774
1775 if (!hdev_is_powered(hdev) || val == enabled) {
1776 bool changed = false;
1777
1778 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1779 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1780 changed = true;
1781 }
1782
1783 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1784 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1785 changed = true;
1786 }
1787
1788 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1789 if (err < 0)
1790 goto unlock;
1791
1792 if (changed)
1793 err = new_settings(hdev, sk);
1794
1795 goto unlock;
1796 }
1797
1798 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1799 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1800 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1801 MGMT_STATUS_BUSY);
1802 goto unlock;
1803 }
1804
1805 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1806 if (!cmd) {
1807 err = -ENOMEM;
1808 goto unlock;
1809 }
1810
1811 hci_req_init(&req, hdev);
1812
1813 memset(&hci_cp, 0, sizeof(hci_cp));
1814
1815 if (val) {
1816 hci_cp.le = val;
1817 hci_cp.simul = 0x00;
1818 } else {
1819 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1820 __hci_req_disable_advertising(&req);
1821 }
1822
1823 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1824 &hci_cp);
1825
1826 err = hci_req_run(&req, le_enable_complete);
1827 if (err < 0)
1828 mgmt_pending_remove(cmd);
1829
1830 unlock:
1831 hci_dev_unlock(hdev);
1832 return err;
1833 }
1834
1835 /* This is a helper function to test for pending mgmt commands that can
1836 * cause CoD or EIR HCI commands. We can only allow one such pending
1837 * mgmt command at a time since otherwise we cannot easily track what
1838 * the current values are, will be, and based on that calculate if a new
1839 * HCI command needs to be sent and if yes with what value.
1840 */
1841 static bool pending_eir_or_class(struct hci_dev *hdev)
1842 {
1843 struct mgmt_pending_cmd *cmd;
1844
1845 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1846 switch (cmd->opcode) {
1847 case MGMT_OP_ADD_UUID:
1848 case MGMT_OP_REMOVE_UUID:
1849 case MGMT_OP_SET_DEV_CLASS:
1850 case MGMT_OP_SET_POWERED:
1851 return true;
1852 }
1853 }
1854
1855 return false;
1856 }
1857
1858 static const u8 bluetooth_base_uuid[] = {
1859 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1860 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1861 };
1862
1863 static u8 get_uuid_size(const u8 *uuid)
1864 {
1865 u32 val;
1866
1867 if (memcmp(uuid, bluetooth_base_uuid, 12))
1868 return 128;
1869
1870 val = get_unaligned_le32(&uuid[12]);
1871 if (val > 0xffff)
1872 return 32;
1873
1874 return 16;
1875 }
1876
1877 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1878 {
1879 struct mgmt_pending_cmd *cmd;
1880
1881 hci_dev_lock(hdev);
1882
1883 cmd = pending_find(mgmt_op, hdev);
1884 if (!cmd)
1885 goto unlock;
1886
1887 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1888 mgmt_status(status), hdev->dev_class, 3);
1889
1890 mgmt_pending_remove(cmd);
1891
1892 unlock:
1893 hci_dev_unlock(hdev);
1894 }
1895
1896 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1897 {
1898 BT_DBG("status 0x%02x", status);
1899
1900 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1901 }
1902
1903 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1904 {
1905 struct mgmt_cp_add_uuid *cp = data;
1906 struct mgmt_pending_cmd *cmd;
1907 struct hci_request req;
1908 struct bt_uuid *uuid;
1909 int err;
1910
1911 BT_DBG("request for %s", hdev->name);
1912
1913 hci_dev_lock(hdev);
1914
1915 if (pending_eir_or_class(hdev)) {
1916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1917 MGMT_STATUS_BUSY);
1918 goto failed;
1919 }
1920
1921 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1922 if (!uuid) {
1923 err = -ENOMEM;
1924 goto failed;
1925 }
1926
1927 memcpy(uuid->uuid, cp->uuid, 16);
1928 uuid->svc_hint = cp->svc_hint;
1929 uuid->size = get_uuid_size(cp->uuid);
1930
1931 list_add_tail(&uuid->list, &hdev->uuids);
1932
1933 hci_req_init(&req, hdev);
1934
1935 __hci_req_update_class(&req);
1936 __hci_req_update_eir(&req);
1937
1938 err = hci_req_run(&req, add_uuid_complete);
1939 if (err < 0) {
1940 if (err != -ENODATA)
1941 goto failed;
1942
1943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1944 hdev->dev_class, 3);
1945 goto failed;
1946 }
1947
1948 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1949 if (!cmd) {
1950 err = -ENOMEM;
1951 goto failed;
1952 }
1953
1954 err = 0;
1955
1956 failed:
1957 hci_dev_unlock(hdev);
1958 return err;
1959 }
1960
1961 static bool enable_service_cache(struct hci_dev *hdev)
1962 {
1963 if (!hdev_is_powered(hdev))
1964 return false;
1965
1966 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1967 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1968 CACHE_TIMEOUT);
1969 return true;
1970 }
1971
1972 return false;
1973 }
1974
1975 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1976 {
1977 BT_DBG("status 0x%02x", status);
1978
1979 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1980 }
1981
1982 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1983 u16 len)
1984 {
1985 struct mgmt_cp_remove_uuid *cp = data;
1986 struct mgmt_pending_cmd *cmd;
1987 struct bt_uuid *match, *tmp;
1988 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1989 struct hci_request req;
1990 int err, found;
1991
1992 BT_DBG("request for %s", hdev->name);
1993
1994 hci_dev_lock(hdev);
1995
1996 if (pending_eir_or_class(hdev)) {
1997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1998 MGMT_STATUS_BUSY);
1999 goto unlock;
2000 }
2001
2002 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2003 hci_uuids_clear(hdev);
2004
2005 if (enable_service_cache(hdev)) {
2006 err = mgmt_cmd_complete(sk, hdev->id,
2007 MGMT_OP_REMOVE_UUID,
2008 0, hdev->dev_class, 3);
2009 goto unlock;
2010 }
2011
2012 goto update_class;
2013 }
2014
2015 found = 0;
2016
2017 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2018 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2019 continue;
2020
2021 list_del(&match->list);
2022 kfree(match);
2023 found++;
2024 }
2025
2026 if (found == 0) {
2027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2028 MGMT_STATUS_INVALID_PARAMS);
2029 goto unlock;
2030 }
2031
2032 update_class:
2033 hci_req_init(&req, hdev);
2034
2035 __hci_req_update_class(&req);
2036 __hci_req_update_eir(&req);
2037
2038 err = hci_req_run(&req, remove_uuid_complete);
2039 if (err < 0) {
2040 if (err != -ENODATA)
2041 goto unlock;
2042
2043 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2044 hdev->dev_class, 3);
2045 goto unlock;
2046 }
2047
2048 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2049 if (!cmd) {
2050 err = -ENOMEM;
2051 goto unlock;
2052 }
2053
2054 err = 0;
2055
2056 unlock:
2057 hci_dev_unlock(hdev);
2058 return err;
2059 }
2060
2061 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2062 {
2063 BT_DBG("status 0x%02x", status);
2064
2065 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2066 }
2067
2068 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2069 u16 len)
2070 {
2071 struct mgmt_cp_set_dev_class *cp = data;
2072 struct mgmt_pending_cmd *cmd;
2073 struct hci_request req;
2074 int err;
2075
2076 BT_DBG("request for %s", hdev->name);
2077
2078 if (!lmp_bredr_capable(hdev))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2081
2082 hci_dev_lock(hdev);
2083
2084 if (pending_eir_or_class(hdev)) {
2085 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2086 MGMT_STATUS_BUSY);
2087 goto unlock;
2088 }
2089
2090 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2091 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2092 MGMT_STATUS_INVALID_PARAMS);
2093 goto unlock;
2094 }
2095
2096 hdev->major_class = cp->major;
2097 hdev->minor_class = cp->minor;
2098
2099 if (!hdev_is_powered(hdev)) {
2100 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2101 hdev->dev_class, 3);
2102 goto unlock;
2103 }
2104
2105 hci_req_init(&req, hdev);
2106
2107 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2108 hci_dev_unlock(hdev);
2109 cancel_delayed_work_sync(&hdev->service_cache);
2110 hci_dev_lock(hdev);
2111 __hci_req_update_eir(&req);
2112 }
2113
2114 __hci_req_update_class(&req);
2115
2116 err = hci_req_run(&req, set_class_complete);
2117 if (err < 0) {
2118 if (err != -ENODATA)
2119 goto unlock;
2120
2121 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2122 hdev->dev_class, 3);
2123 goto unlock;
2124 }
2125
2126 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2127 if (!cmd) {
2128 err = -ENOMEM;
2129 goto unlock;
2130 }
2131
2132 err = 0;
2133
2134 unlock:
2135 hci_dev_unlock(hdev);
2136 return err;
2137 }
2138
2139 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2140 u16 len)
2141 {
2142 struct mgmt_cp_load_link_keys *cp = data;
2143 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2144 sizeof(struct mgmt_link_key_info));
2145 u16 key_count, expected_len;
2146 bool changed;
2147 int i;
2148
2149 BT_DBG("request for %s", hdev->name);
2150
2151 if (!lmp_bredr_capable(hdev))
2152 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2153 MGMT_STATUS_NOT_SUPPORTED);
2154
2155 key_count = __le16_to_cpu(cp->key_count);
2156 if (key_count > max_key_count) {
2157 BT_ERR("load_link_keys: too big key_count value %u",
2158 key_count);
2159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2160 MGMT_STATUS_INVALID_PARAMS);
2161 }
2162
2163 expected_len = sizeof(*cp) + key_count *
2164 sizeof(struct mgmt_link_key_info);
2165 if (expected_len != len) {
2166 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2167 expected_len, len);
2168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2169 MGMT_STATUS_INVALID_PARAMS);
2170 }
2171
2172 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2174 MGMT_STATUS_INVALID_PARAMS);
2175
2176 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2177 key_count);
2178
2179 for (i = 0; i < key_count; i++) {
2180 struct mgmt_link_key_info *key = &cp->keys[i];
2181
2182 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2183 return mgmt_cmd_status(sk, hdev->id,
2184 MGMT_OP_LOAD_LINK_KEYS,
2185 MGMT_STATUS_INVALID_PARAMS);
2186 }
2187
2188 hci_dev_lock(hdev);
2189
2190 hci_link_keys_clear(hdev);
2191
2192 if (cp->debug_keys)
2193 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2194 else
2195 changed = hci_dev_test_and_clear_flag(hdev,
2196 HCI_KEEP_DEBUG_KEYS);
2197
2198 if (changed)
2199 new_settings(hdev, NULL);
2200
2201 for (i = 0; i < key_count; i++) {
2202 struct mgmt_link_key_info *key = &cp->keys[i];
2203
2204 /* Always ignore debug keys and require a new pairing if
2205 * the user wants to use them.
2206 */
2207 if (key->type == HCI_LK_DEBUG_COMBINATION)
2208 continue;
2209
2210 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2211 key->type, key->pin_len, NULL);
2212 }
2213
2214 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2215
2216 hci_dev_unlock(hdev);
2217
2218 return 0;
2219 }
2220
2221 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2222 u8 addr_type, struct sock *skip_sk)
2223 {
2224 struct mgmt_ev_device_unpaired ev;
2225
2226 bacpy(&ev.addr.bdaddr, bdaddr);
2227 ev.addr.type = addr_type;
2228
2229 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2230 skip_sk);
2231 }
2232
2233 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2234 u16 len)
2235 {
2236 struct mgmt_cp_unpair_device *cp = data;
2237 struct mgmt_rp_unpair_device rp;
2238 struct hci_conn_params *params;
2239 struct mgmt_pending_cmd *cmd;
2240 struct hci_conn *conn;
2241 u8 addr_type;
2242 int err;
2243
2244 memset(&rp, 0, sizeof(rp));
2245 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2246 rp.addr.type = cp->addr.type;
2247
2248 if (!bdaddr_type_is_valid(cp->addr.type))
2249 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2250 MGMT_STATUS_INVALID_PARAMS,
2251 &rp, sizeof(rp));
2252
2253 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2254 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2255 MGMT_STATUS_INVALID_PARAMS,
2256 &rp, sizeof(rp));
2257
2258 hci_dev_lock(hdev);
2259
2260 if (!hdev_is_powered(hdev)) {
2261 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2262 MGMT_STATUS_NOT_POWERED, &rp,
2263 sizeof(rp));
2264 goto unlock;
2265 }
2266
2267 if (cp->addr.type == BDADDR_BREDR) {
2268 /* If disconnection is requested, then look up the
2269 * connection. If the remote device is connected, it
2270 * will be later used to terminate the link.
2271 *
2272 * Setting it to NULL explicitly will cause no
2273 * termination of the link.
2274 */
2275 if (cp->disconnect)
2276 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2277 &cp->addr.bdaddr);
2278 else
2279 conn = NULL;
2280
2281 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2282 if (err < 0) {
2283 err = mgmt_cmd_complete(sk, hdev->id,
2284 MGMT_OP_UNPAIR_DEVICE,
2285 MGMT_STATUS_NOT_PAIRED, &rp,
2286 sizeof(rp));
2287 goto unlock;
2288 }
2289
2290 goto done;
2291 }
2292
2293 /* LE address type */
2294 addr_type = le_addr_type(cp->addr.type);
2295
2296 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2297
2298 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2299 if (err < 0) {
2300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2301 MGMT_STATUS_NOT_PAIRED, &rp,
2302 sizeof(rp));
2303 goto unlock;
2304 }
2305
2306 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2307 if (!conn) {
2308 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2309 goto done;
2310 }
2311
2312 /* Abort any ongoing SMP pairing */
2313 smp_cancel_pairing(conn);
2314
2315 /* Defer clearing up the connection parameters until closing to
2316 * give a chance of keeping them if a repairing happens.
2317 */
2318 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2319
2320 /* Disable auto-connection parameters if present */
2321 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2322 if (params) {
2323 if (params->explicit_connect)
2324 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2325 else
2326 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2327 }
2328
2329 /* If disconnection is not requested, then clear the connection
2330 * variable so that the link is not terminated.
2331 */
2332 if (!cp->disconnect)
2333 conn = NULL;
2334
2335 done:
2336 /* If the connection variable is set, then termination of the
2337 * link is requested.
2338 */
2339 if (!conn) {
2340 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2341 &rp, sizeof(rp));
2342 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2343 goto unlock;
2344 }
2345
2346 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2347 sizeof(*cp));
2348 if (!cmd) {
2349 err = -ENOMEM;
2350 goto unlock;
2351 }
2352
2353 cmd->cmd_complete = addr_cmd_complete;
2354
2355 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2356 if (err < 0)
2357 mgmt_pending_remove(cmd);
2358
2359 unlock:
2360 hci_dev_unlock(hdev);
2361 return err;
2362 }
2363
2364 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2365 u16 len)
2366 {
2367 struct mgmt_cp_disconnect *cp = data;
2368 struct mgmt_rp_disconnect rp;
2369 struct mgmt_pending_cmd *cmd;
2370 struct hci_conn *conn;
2371 int err;
2372
2373 BT_DBG("");
2374
2375 memset(&rp, 0, sizeof(rp));
2376 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2377 rp.addr.type = cp->addr.type;
2378
2379 if (!bdaddr_type_is_valid(cp->addr.type))
2380 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 MGMT_STATUS_INVALID_PARAMS,
2382 &rp, sizeof(rp));
2383
2384 hci_dev_lock(hdev);
2385
2386 if (!test_bit(HCI_UP, &hdev->flags)) {
2387 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2388 MGMT_STATUS_NOT_POWERED, &rp,
2389 sizeof(rp));
2390 goto failed;
2391 }
2392
2393 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2394 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2395 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2396 goto failed;
2397 }
2398
2399 if (cp->addr.type == BDADDR_BREDR)
2400 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2401 &cp->addr.bdaddr);
2402 else
2403 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2404 le_addr_type(cp->addr.type));
2405
2406 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2408 MGMT_STATUS_NOT_CONNECTED, &rp,
2409 sizeof(rp));
2410 goto failed;
2411 }
2412
2413 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2414 if (!cmd) {
2415 err = -ENOMEM;
2416 goto failed;
2417 }
2418
2419 cmd->cmd_complete = generic_cmd_complete;
2420
2421 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2422 if (err < 0)
2423 mgmt_pending_remove(cmd);
2424
2425 failed:
2426 hci_dev_unlock(hdev);
2427 return err;
2428 }
2429
2430 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2431 {
2432 switch (link_type) {
2433 case LE_LINK:
2434 switch (addr_type) {
2435 case ADDR_LE_DEV_PUBLIC:
2436 return BDADDR_LE_PUBLIC;
2437
2438 default:
2439 /* Fallback to LE Random address type */
2440 return BDADDR_LE_RANDOM;
2441 }
2442
2443 default:
2444 /* Fallback to BR/EDR type */
2445 return BDADDR_BREDR;
2446 }
2447 }
2448
2449 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2450 u16 data_len)
2451 {
2452 struct mgmt_rp_get_connections *rp;
2453 struct hci_conn *c;
2454 size_t rp_len;
2455 int err;
2456 u16 i;
2457
2458 BT_DBG("");
2459
2460 hci_dev_lock(hdev);
2461
2462 if (!hdev_is_powered(hdev)) {
2463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2464 MGMT_STATUS_NOT_POWERED);
2465 goto unlock;
2466 }
2467
2468 i = 0;
2469 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2470 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2471 i++;
2472 }
2473
2474 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2475 rp = kmalloc(rp_len, GFP_KERNEL);
2476 if (!rp) {
2477 err = -ENOMEM;
2478 goto unlock;
2479 }
2480
2481 i = 0;
2482 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2483 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2484 continue;
2485 bacpy(&rp->addr[i].bdaddr, &c->dst);
2486 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2487 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2488 continue;
2489 i++;
2490 }
2491
2492 rp->conn_count = cpu_to_le16(i);
2493
2494 /* Recalculate length in case of filtered SCO connections, etc */
2495 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2496
2497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2498 rp_len);
2499
2500 kfree(rp);
2501
2502 unlock:
2503 hci_dev_unlock(hdev);
2504 return err;
2505 }
2506
2507 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2508 struct mgmt_cp_pin_code_neg_reply *cp)
2509 {
2510 struct mgmt_pending_cmd *cmd;
2511 int err;
2512
2513 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2514 sizeof(*cp));
2515 if (!cmd)
2516 return -ENOMEM;
2517
2518 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2519 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2520 if (err < 0)
2521 mgmt_pending_remove(cmd);
2522
2523 return err;
2524 }
2525
2526 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2527 u16 len)
2528 {
2529 struct hci_conn *conn;
2530 struct mgmt_cp_pin_code_reply *cp = data;
2531 struct hci_cp_pin_code_reply reply;
2532 struct mgmt_pending_cmd *cmd;
2533 int err;
2534
2535 BT_DBG("");
2536
2537 hci_dev_lock(hdev);
2538
2539 if (!hdev_is_powered(hdev)) {
2540 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2541 MGMT_STATUS_NOT_POWERED);
2542 goto failed;
2543 }
2544
2545 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2546 if (!conn) {
2547 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2548 MGMT_STATUS_NOT_CONNECTED);
2549 goto failed;
2550 }
2551
2552 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2553 struct mgmt_cp_pin_code_neg_reply ncp;
2554
2555 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2556
2557 BT_ERR("PIN code is not 16 bytes long");
2558
2559 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2560 if (err >= 0)
2561 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2562 MGMT_STATUS_INVALID_PARAMS);
2563
2564 goto failed;
2565 }
2566
2567 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2568 if (!cmd) {
2569 err = -ENOMEM;
2570 goto failed;
2571 }
2572
2573 cmd->cmd_complete = addr_cmd_complete;
2574
2575 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2576 reply.pin_len = cp->pin_len;
2577 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2578
2579 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2580 if (err < 0)
2581 mgmt_pending_remove(cmd);
2582
2583 failed:
2584 hci_dev_unlock(hdev);
2585 return err;
2586 }
2587
2588 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2589 u16 len)
2590 {
2591 struct mgmt_cp_set_io_capability *cp = data;
2592
2593 BT_DBG("");
2594
2595 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2597 MGMT_STATUS_INVALID_PARAMS);
2598
2599 hci_dev_lock(hdev);
2600
2601 hdev->io_capability = cp->io_capability;
2602
2603 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2604 hdev->io_capability);
2605
2606 hci_dev_unlock(hdev);
2607
2608 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2609 NULL, 0);
2610 }
2611
2612 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2613 {
2614 struct hci_dev *hdev = conn->hdev;
2615 struct mgmt_pending_cmd *cmd;
2616
2617 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2618 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2619 continue;
2620
2621 if (cmd->user_data != conn)
2622 continue;
2623
2624 return cmd;
2625 }
2626
2627 return NULL;
2628 }
2629
2630 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2631 {
2632 struct mgmt_rp_pair_device rp;
2633 struct hci_conn *conn = cmd->user_data;
2634 int err;
2635
2636 bacpy(&rp.addr.bdaddr, &conn->dst);
2637 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2638
2639 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2640 status, &rp, sizeof(rp));
2641
2642 /* So we don't get further callbacks for this connection */
2643 conn->connect_cfm_cb = NULL;
2644 conn->security_cfm_cb = NULL;
2645 conn->disconn_cfm_cb = NULL;
2646
2647 hci_conn_drop(conn);
2648
2649 /* The device is paired so there is no need to remove
2650 * its connection parameters anymore.
2651 */
2652 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2653
2654 hci_conn_put(conn);
2655
2656 return err;
2657 }
2658
2659 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2660 {
2661 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2662 struct mgmt_pending_cmd *cmd;
2663
2664 cmd = find_pairing(conn);
2665 if (cmd) {
2666 cmd->cmd_complete(cmd, status);
2667 mgmt_pending_remove(cmd);
2668 }
2669 }
2670
2671 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2672 {
2673 struct mgmt_pending_cmd *cmd;
2674
2675 BT_DBG("status %u", status);
2676
2677 cmd = find_pairing(conn);
2678 if (!cmd) {
2679 BT_DBG("Unable to find a pending command");
2680 return;
2681 }
2682
2683 cmd->cmd_complete(cmd, mgmt_status(status));
2684 mgmt_pending_remove(cmd);
2685 }
2686
2687 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2688 {
2689 struct mgmt_pending_cmd *cmd;
2690
2691 BT_DBG("status %u", status);
2692
2693 if (!status)
2694 return;
2695
2696 cmd = find_pairing(conn);
2697 if (!cmd) {
2698 BT_DBG("Unable to find a pending command");
2699 return;
2700 }
2701
2702 cmd->cmd_complete(cmd, mgmt_status(status));
2703 mgmt_pending_remove(cmd);
2704 }
2705
2706 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2707 u16 len)
2708 {
2709 struct mgmt_cp_pair_device *cp = data;
2710 struct mgmt_rp_pair_device rp;
2711 struct mgmt_pending_cmd *cmd;
2712 u8 sec_level, auth_type;
2713 struct hci_conn *conn;
2714 int err;
2715
2716 BT_DBG("");
2717
2718 memset(&rp, 0, sizeof(rp));
2719 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2720 rp.addr.type = cp->addr.type;
2721
2722 if (!bdaddr_type_is_valid(cp->addr.type))
2723 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2724 MGMT_STATUS_INVALID_PARAMS,
2725 &rp, sizeof(rp));
2726
2727 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2728 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2729 MGMT_STATUS_INVALID_PARAMS,
2730 &rp, sizeof(rp));
2731
2732 hci_dev_lock(hdev);
2733
2734 if (!hdev_is_powered(hdev)) {
2735 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2736 MGMT_STATUS_NOT_POWERED, &rp,
2737 sizeof(rp));
2738 goto unlock;
2739 }
2740
2741 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2742 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2743 MGMT_STATUS_ALREADY_PAIRED, &rp,
2744 sizeof(rp));
2745 goto unlock;
2746 }
2747
2748 sec_level = BT_SECURITY_MEDIUM;
2749 auth_type = HCI_AT_DEDICATED_BONDING;
2750
2751 if (cp->addr.type == BDADDR_BREDR) {
2752 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2753 auth_type);
2754 } else {
2755 u8 addr_type = le_addr_type(cp->addr.type);
2756 struct hci_conn_params *p;
2757
2758 /* When pairing a new device, it is expected to remember
2759 * this device for future connections. Adding the connection
2760 * parameter information ahead of time allows tracking
2761 * of the slave preferred values and will speed up any
2762 * further connection establishment.
2763 *
2764 * If connection parameters already exist, then they
2765 * will be kept and this function does nothing.
2766 */
2767 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2768
2769 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2770 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2771
2772 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2773 addr_type, sec_level,
2774 HCI_LE_CONN_TIMEOUT);
2775 }
2776
2777 if (IS_ERR(conn)) {
2778 int status;
2779
2780 if (PTR_ERR(conn) == -EBUSY)
2781 status = MGMT_STATUS_BUSY;
2782 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2783 status = MGMT_STATUS_NOT_SUPPORTED;
2784 else if (PTR_ERR(conn) == -ECONNREFUSED)
2785 status = MGMT_STATUS_REJECTED;
2786 else
2787 status = MGMT_STATUS_CONNECT_FAILED;
2788
2789 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2790 status, &rp, sizeof(rp));
2791 goto unlock;
2792 }
2793
2794 if (conn->connect_cfm_cb) {
2795 hci_conn_drop(conn);
2796 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2797 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2798 goto unlock;
2799 }
2800
2801 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2802 if (!cmd) {
2803 err = -ENOMEM;
2804 hci_conn_drop(conn);
2805 goto unlock;
2806 }
2807
2808 cmd->cmd_complete = pairing_complete;
2809
2810 /* For LE, just connecting isn't a proof that the pairing finished */
2811 if (cp->addr.type == BDADDR_BREDR) {
2812 conn->connect_cfm_cb = pairing_complete_cb;
2813 conn->security_cfm_cb = pairing_complete_cb;
2814 conn->disconn_cfm_cb = pairing_complete_cb;
2815 } else {
2816 conn->connect_cfm_cb = le_pairing_complete_cb;
2817 conn->security_cfm_cb = le_pairing_complete_cb;
2818 conn->disconn_cfm_cb = le_pairing_complete_cb;
2819 }
2820
2821 conn->io_capability = cp->io_cap;
2822 cmd->user_data = hci_conn_get(conn);
2823
2824 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2825 hci_conn_security(conn, sec_level, auth_type, true)) {
2826 cmd->cmd_complete(cmd, 0);
2827 mgmt_pending_remove(cmd);
2828 }
2829
2830 err = 0;
2831
2832 unlock:
2833 hci_dev_unlock(hdev);
2834 return err;
2835 }
2836
2837 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2838 u16 len)
2839 {
2840 struct mgmt_addr_info *addr = data;
2841 struct mgmt_pending_cmd *cmd;
2842 struct hci_conn *conn;
2843 int err;
2844
2845 BT_DBG("");
2846
2847 hci_dev_lock(hdev);
2848
2849 if (!hdev_is_powered(hdev)) {
2850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2851 MGMT_STATUS_NOT_POWERED);
2852 goto unlock;
2853 }
2854
2855 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2856 if (!cmd) {
2857 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2858 MGMT_STATUS_INVALID_PARAMS);
2859 goto unlock;
2860 }
2861
2862 conn = cmd->user_data;
2863
2864 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2866 MGMT_STATUS_INVALID_PARAMS);
2867 goto unlock;
2868 }
2869
2870 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2871 mgmt_pending_remove(cmd);
2872
2873 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2874 addr, sizeof(*addr));
2875 unlock:
2876 hci_dev_unlock(hdev);
2877 return err;
2878 }
2879
2880 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2881 struct mgmt_addr_info *addr, u16 mgmt_op,
2882 u16 hci_op, __le32 passkey)
2883 {
2884 struct mgmt_pending_cmd *cmd;
2885 struct hci_conn *conn;
2886 int err;
2887
2888 hci_dev_lock(hdev);
2889
2890 if (!hdev_is_powered(hdev)) {
2891 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2892 MGMT_STATUS_NOT_POWERED, addr,
2893 sizeof(*addr));
2894 goto done;
2895 }
2896
2897 if (addr->type == BDADDR_BREDR)
2898 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2899 else
2900 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2901 le_addr_type(addr->type));
2902
2903 if (!conn) {
2904 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2905 MGMT_STATUS_NOT_CONNECTED, addr,
2906 sizeof(*addr));
2907 goto done;
2908 }
2909
2910 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2911 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2912 if (!err)
2913 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2914 MGMT_STATUS_SUCCESS, addr,
2915 sizeof(*addr));
2916 else
2917 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2918 MGMT_STATUS_FAILED, addr,
2919 sizeof(*addr));
2920
2921 goto done;
2922 }
2923
2924 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2925 if (!cmd) {
2926 err = -ENOMEM;
2927 goto done;
2928 }
2929
2930 cmd->cmd_complete = addr_cmd_complete;
2931
2932 /* Continue with pairing via HCI */
2933 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2934 struct hci_cp_user_passkey_reply cp;
2935
2936 bacpy(&cp.bdaddr, &addr->bdaddr);
2937 cp.passkey = passkey;
2938 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2939 } else
2940 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2941 &addr->bdaddr);
2942
2943 if (err < 0)
2944 mgmt_pending_remove(cmd);
2945
2946 done:
2947 hci_dev_unlock(hdev);
2948 return err;
2949 }
2950
2951 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2952 void *data, u16 len)
2953 {
2954 struct mgmt_cp_pin_code_neg_reply *cp = data;
2955
2956 BT_DBG("");
2957
2958 return user_pairing_resp(sk, hdev, &cp->addr,
2959 MGMT_OP_PIN_CODE_NEG_REPLY,
2960 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2961 }
2962
2963 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2964 u16 len)
2965 {
2966 struct mgmt_cp_user_confirm_reply *cp = data;
2967
2968 BT_DBG("");
2969
2970 if (len != sizeof(*cp))
2971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2972 MGMT_STATUS_INVALID_PARAMS);
2973
2974 return user_pairing_resp(sk, hdev, &cp->addr,
2975 MGMT_OP_USER_CONFIRM_REPLY,
2976 HCI_OP_USER_CONFIRM_REPLY, 0);
2977 }
2978
2979 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 void *data, u16 len)
2981 {
2982 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2983
2984 BT_DBG("");
2985
2986 return user_pairing_resp(sk, hdev, &cp->addr,
2987 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2988 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2989 }
2990
2991 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2992 u16 len)
2993 {
2994 struct mgmt_cp_user_passkey_reply *cp = data;
2995
2996 BT_DBG("");
2997
2998 return user_pairing_resp(sk, hdev, &cp->addr,
2999 MGMT_OP_USER_PASSKEY_REPLY,
3000 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3001 }
3002
3003 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3004 void *data, u16 len)
3005 {
3006 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3007
3008 BT_DBG("");
3009
3010 return user_pairing_resp(sk, hdev, &cp->addr,
3011 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3012 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3013 }
3014
3015 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3016 {
3017 struct mgmt_cp_set_local_name *cp;
3018 struct mgmt_pending_cmd *cmd;
3019
3020 BT_DBG("status 0x%02x", status);
3021
3022 hci_dev_lock(hdev);
3023
3024 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3025 if (!cmd)
3026 goto unlock;
3027
3028 cp = cmd->param;
3029
3030 if (status)
3031 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3032 mgmt_status(status));
3033 else
3034 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3035 cp, sizeof(*cp));
3036
3037 mgmt_pending_remove(cmd);
3038
3039 unlock:
3040 hci_dev_unlock(hdev);
3041 }
3042
3043 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3044 u16 len)
3045 {
3046 struct mgmt_cp_set_local_name *cp = data;
3047 struct mgmt_pending_cmd *cmd;
3048 struct hci_request req;
3049 int err;
3050
3051 BT_DBG("");
3052
3053 hci_dev_lock(hdev);
3054
3055 /* If the old values are the same as the new ones just return a
3056 * direct command complete event.
3057 */
3058 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3059 !memcmp(hdev->short_name, cp->short_name,
3060 sizeof(hdev->short_name))) {
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3062 data, len);
3063 goto failed;
3064 }
3065
3066 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3067
3068 if (!hdev_is_powered(hdev)) {
3069 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3070
3071 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3072 data, len);
3073 if (err < 0)
3074 goto failed;
3075
3076 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3077 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3078 ext_info_changed(hdev, sk);
3079
3080 goto failed;
3081 }
3082
3083 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3084 if (!cmd) {
3085 err = -ENOMEM;
3086 goto failed;
3087 }
3088
3089 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3090
3091 hci_req_init(&req, hdev);
3092
3093 if (lmp_bredr_capable(hdev)) {
3094 __hci_req_update_name(&req);
3095 __hci_req_update_eir(&req);
3096 }
3097
3098 /* The name is stored in the scan response data and so
3099 * no need to udpate the advertising data here.
3100 */
3101 if (lmp_le_capable(hdev))
3102 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3103
3104 err = hci_req_run(&req, set_name_complete);
3105 if (err < 0)
3106 mgmt_pending_remove(cmd);
3107
3108 failed:
3109 hci_dev_unlock(hdev);
3110 return err;
3111 }
3112
3113 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3114 u16 opcode, struct sk_buff *skb)
3115 {
3116 struct mgmt_rp_read_local_oob_data mgmt_rp;
3117 size_t rp_size = sizeof(mgmt_rp);
3118 struct mgmt_pending_cmd *cmd;
3119
3120 BT_DBG("%s status %u", hdev->name, status);
3121
3122 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3123 if (!cmd)
3124 return;
3125
3126 if (status || !skb) {
3127 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3128 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3129 goto remove;
3130 }
3131
3132 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3133
3134 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3135 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3136
3137 if (skb->len < sizeof(*rp)) {
3138 mgmt_cmd_status(cmd->sk, hdev->id,
3139 MGMT_OP_READ_LOCAL_OOB_DATA,
3140 MGMT_STATUS_FAILED);
3141 goto remove;
3142 }
3143
3144 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3145 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3146
3147 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3148 } else {
3149 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3150
3151 if (skb->len < sizeof(*rp)) {
3152 mgmt_cmd_status(cmd->sk, hdev->id,
3153 MGMT_OP_READ_LOCAL_OOB_DATA,
3154 MGMT_STATUS_FAILED);
3155 goto remove;
3156 }
3157
3158 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3159 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3160
3161 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3162 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3163 }
3164
3165 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3166 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3167
3168 remove:
3169 mgmt_pending_remove(cmd);
3170 }
3171
3172 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3173 void *data, u16 data_len)
3174 {
3175 struct mgmt_pending_cmd *cmd;
3176 struct hci_request req;
3177 int err;
3178
3179 BT_DBG("%s", hdev->name);
3180
3181 hci_dev_lock(hdev);
3182
3183 if (!hdev_is_powered(hdev)) {
3184 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3185 MGMT_STATUS_NOT_POWERED);
3186 goto unlock;
3187 }
3188
3189 if (!lmp_ssp_capable(hdev)) {
3190 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3191 MGMT_STATUS_NOT_SUPPORTED);
3192 goto unlock;
3193 }
3194
3195 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3196 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3197 MGMT_STATUS_BUSY);
3198 goto unlock;
3199 }
3200
3201 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3202 if (!cmd) {
3203 err = -ENOMEM;
3204 goto unlock;
3205 }
3206
3207 hci_req_init(&req, hdev);
3208
3209 if (bredr_sc_enabled(hdev))
3210 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3211 else
3212 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3213
3214 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3215 if (err < 0)
3216 mgmt_pending_remove(cmd);
3217
3218 unlock:
3219 hci_dev_unlock(hdev);
3220 return err;
3221 }
3222
3223 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3224 void *data, u16 len)
3225 {
3226 struct mgmt_addr_info *addr = data;
3227 int err;
3228
3229 BT_DBG("%s ", hdev->name);
3230
3231 if (!bdaddr_type_is_valid(addr->type))
3232 return mgmt_cmd_complete(sk, hdev->id,
3233 MGMT_OP_ADD_REMOTE_OOB_DATA,
3234 MGMT_STATUS_INVALID_PARAMS,
3235 addr, sizeof(*addr));
3236
3237 hci_dev_lock(hdev);
3238
3239 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3240 struct mgmt_cp_add_remote_oob_data *cp = data;
3241 u8 status;
3242
3243 if (cp->addr.type != BDADDR_BREDR) {
3244 err = mgmt_cmd_complete(sk, hdev->id,
3245 MGMT_OP_ADD_REMOTE_OOB_DATA,
3246 MGMT_STATUS_INVALID_PARAMS,
3247 &cp->addr, sizeof(cp->addr));
3248 goto unlock;
3249 }
3250
3251 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3252 cp->addr.type, cp->hash,
3253 cp->rand, NULL, NULL);
3254 if (err < 0)
3255 status = MGMT_STATUS_FAILED;
3256 else
3257 status = MGMT_STATUS_SUCCESS;
3258
3259 err = mgmt_cmd_complete(sk, hdev->id,
3260 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3261 &cp->addr, sizeof(cp->addr));
3262 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3263 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3264 u8 *rand192, *hash192, *rand256, *hash256;
3265 u8 status;
3266
3267 if (bdaddr_type_is_le(cp->addr.type)) {
3268 /* Enforce zero-valued 192-bit parameters as
3269 * long as legacy SMP OOB isn't implemented.
3270 */
3271 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3272 memcmp(cp->hash192, ZERO_KEY, 16)) {
3273 err = mgmt_cmd_complete(sk, hdev->id,
3274 MGMT_OP_ADD_REMOTE_OOB_DATA,
3275 MGMT_STATUS_INVALID_PARAMS,
3276 addr, sizeof(*addr));
3277 goto unlock;
3278 }
3279
3280 rand192 = NULL;
3281 hash192 = NULL;
3282 } else {
3283 /* In case one of the P-192 values is set to zero,
3284 * then just disable OOB data for P-192.
3285 */
3286 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3287 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3288 rand192 = NULL;
3289 hash192 = NULL;
3290 } else {
3291 rand192 = cp->rand192;
3292 hash192 = cp->hash192;
3293 }
3294 }
3295
3296 /* In case one of the P-256 values is set to zero, then just
3297 * disable OOB data for P-256.
3298 */
3299 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3300 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3301 rand256 = NULL;
3302 hash256 = NULL;
3303 } else {
3304 rand256 = cp->rand256;
3305 hash256 = cp->hash256;
3306 }
3307
3308 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3309 cp->addr.type, hash192, rand192,
3310 hash256, rand256);
3311 if (err < 0)
3312 status = MGMT_STATUS_FAILED;
3313 else
3314 status = MGMT_STATUS_SUCCESS;
3315
3316 err = mgmt_cmd_complete(sk, hdev->id,
3317 MGMT_OP_ADD_REMOTE_OOB_DATA,
3318 status, &cp->addr, sizeof(cp->addr));
3319 } else {
3320 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3321 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3322 MGMT_STATUS_INVALID_PARAMS);
3323 }
3324
3325 unlock:
3326 hci_dev_unlock(hdev);
3327 return err;
3328 }
3329
3330 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3331 void *data, u16 len)
3332 {
3333 struct mgmt_cp_remove_remote_oob_data *cp = data;
3334 u8 status;
3335 int err;
3336
3337 BT_DBG("%s", hdev->name);
3338
3339 if (cp->addr.type != BDADDR_BREDR)
3340 return mgmt_cmd_complete(sk, hdev->id,
3341 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3342 MGMT_STATUS_INVALID_PARAMS,
3343 &cp->addr, sizeof(cp->addr));
3344
3345 hci_dev_lock(hdev);
3346
3347 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3348 hci_remote_oob_data_clear(hdev);
3349 status = MGMT_STATUS_SUCCESS;
3350 goto done;
3351 }
3352
3353 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3354 if (err < 0)
3355 status = MGMT_STATUS_INVALID_PARAMS;
3356 else
3357 status = MGMT_STATUS_SUCCESS;
3358
3359 done:
3360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3361 status, &cp->addr, sizeof(cp->addr));
3362
3363 hci_dev_unlock(hdev);
3364 return err;
3365 }
3366
3367 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3368 {
3369 struct mgmt_pending_cmd *cmd;
3370
3371 BT_DBG("status %d", status);
3372
3373 hci_dev_lock(hdev);
3374
3375 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3376 if (!cmd)
3377 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3378
3379 if (!cmd)
3380 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3381
3382 if (cmd) {
3383 cmd->cmd_complete(cmd, mgmt_status(status));
3384 mgmt_pending_remove(cmd);
3385 }
3386
3387 hci_dev_unlock(hdev);
3388 }
3389
3390 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3391 uint8_t *mgmt_status)
3392 {
3393 switch (type) {
3394 case DISCOV_TYPE_LE:
3395 *mgmt_status = mgmt_le_support(hdev);
3396 if (*mgmt_status)
3397 return false;
3398 break;
3399 case DISCOV_TYPE_INTERLEAVED:
3400 *mgmt_status = mgmt_le_support(hdev);
3401 if (*mgmt_status)
3402 return false;
3403 /* Intentional fall-through */
3404 case DISCOV_TYPE_BREDR:
3405 *mgmt_status = mgmt_bredr_support(hdev);
3406 if (*mgmt_status)
3407 return false;
3408 break;
3409 default:
3410 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3411 return false;
3412 }
3413
3414 return true;
3415 }
3416
3417 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3418 u16 op, void *data, u16 len)
3419 {
3420 struct mgmt_cp_start_discovery *cp = data;
3421 struct mgmt_pending_cmd *cmd;
3422 u8 status;
3423 int err;
3424
3425 BT_DBG("%s", hdev->name);
3426
3427 hci_dev_lock(hdev);
3428
3429 if (!hdev_is_powered(hdev)) {
3430 err = mgmt_cmd_complete(sk, hdev->id, op,
3431 MGMT_STATUS_NOT_POWERED,
3432 &cp->type, sizeof(cp->type));
3433 goto failed;
3434 }
3435
3436 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3437 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3438 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3439 &cp->type, sizeof(cp->type));
3440 goto failed;
3441 }
3442
3443 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3444 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3445 &cp->type, sizeof(cp->type));
3446 goto failed;
3447 }
3448
3449 /* Clear the discovery filter first to free any previously
3450 * allocated memory for the UUID list.
3451 */
3452 hci_discovery_filter_clear(hdev);
3453
3454 hdev->discovery.type = cp->type;
3455 hdev->discovery.report_invalid_rssi = false;
3456 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3457 hdev->discovery.limited = true;
3458 else
3459 hdev->discovery.limited = false;
3460
3461 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3462 if (!cmd) {
3463 err = -ENOMEM;
3464 goto failed;
3465 }
3466
3467 cmd->cmd_complete = generic_cmd_complete;
3468
3469 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3470 queue_work(hdev->req_workqueue, &hdev->discov_update);
3471 err = 0;
3472
3473 failed:
3474 hci_dev_unlock(hdev);
3475 return err;
3476 }
3477
3478 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3479 void *data, u16 len)
3480 {
3481 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3482 data, len);
3483 }
3484
3485 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3486 void *data, u16 len)
3487 {
3488 return start_discovery_internal(sk, hdev,
3489 MGMT_OP_START_LIMITED_DISCOVERY,
3490 data, len);
3491 }
3492
3493 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3494 u8 status)
3495 {
3496 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3497 cmd->param, 1);
3498 }
3499
3500 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3501 void *data, u16 len)
3502 {
3503 struct mgmt_cp_start_service_discovery *cp = data;
3504 struct mgmt_pending_cmd *cmd;
3505 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3506 u16 uuid_count, expected_len;
3507 u8 status;
3508 int err;
3509
3510 BT_DBG("%s", hdev->name);
3511
3512 hci_dev_lock(hdev);
3513
3514 if (!hdev_is_powered(hdev)) {
3515 err = mgmt_cmd_complete(sk, hdev->id,
3516 MGMT_OP_START_SERVICE_DISCOVERY,
3517 MGMT_STATUS_NOT_POWERED,
3518 &cp->type, sizeof(cp->type));
3519 goto failed;
3520 }
3521
3522 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3523 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3524 err = mgmt_cmd_complete(sk, hdev->id,
3525 MGMT_OP_START_SERVICE_DISCOVERY,
3526 MGMT_STATUS_BUSY, &cp->type,
3527 sizeof(cp->type));
3528 goto failed;
3529 }
3530
3531 uuid_count = __le16_to_cpu(cp->uuid_count);
3532 if (uuid_count > max_uuid_count) {
3533 BT_ERR("service_discovery: too big uuid_count value %u",
3534 uuid_count);
3535 err = mgmt_cmd_complete(sk, hdev->id,
3536 MGMT_OP_START_SERVICE_DISCOVERY,
3537 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3538 sizeof(cp->type));
3539 goto failed;
3540 }
3541
3542 expected_len = sizeof(*cp) + uuid_count * 16;
3543 if (expected_len != len) {
3544 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3545 expected_len, len);
3546 err = mgmt_cmd_complete(sk, hdev->id,
3547 MGMT_OP_START_SERVICE_DISCOVERY,
3548 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3549 sizeof(cp->type));
3550 goto failed;
3551 }
3552
3553 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3554 err = mgmt_cmd_complete(sk, hdev->id,
3555 MGMT_OP_START_SERVICE_DISCOVERY,
3556 status, &cp->type, sizeof(cp->type));
3557 goto failed;
3558 }
3559
3560 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3561 hdev, data, len);
3562 if (!cmd) {
3563 err = -ENOMEM;
3564 goto failed;
3565 }
3566
3567 cmd->cmd_complete = service_discovery_cmd_complete;
3568
3569 /* Clear the discovery filter first to free any previously
3570 * allocated memory for the UUID list.
3571 */
3572 hci_discovery_filter_clear(hdev);
3573
3574 hdev->discovery.result_filtering = true;
3575 hdev->discovery.type = cp->type;
3576 hdev->discovery.rssi = cp->rssi;
3577 hdev->discovery.uuid_count = uuid_count;
3578
3579 if (uuid_count > 0) {
3580 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3581 GFP_KERNEL);
3582 if (!hdev->discovery.uuids) {
3583 err = mgmt_cmd_complete(sk, hdev->id,
3584 MGMT_OP_START_SERVICE_DISCOVERY,
3585 MGMT_STATUS_FAILED,
3586 &cp->type, sizeof(cp->type));
3587 mgmt_pending_remove(cmd);
3588 goto failed;
3589 }
3590 }
3591
3592 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3593 queue_work(hdev->req_workqueue, &hdev->discov_update);
3594 err = 0;
3595
3596 failed:
3597 hci_dev_unlock(hdev);
3598 return err;
3599 }
3600
3601 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3602 {
3603 struct mgmt_pending_cmd *cmd;
3604
3605 BT_DBG("status %d", status);
3606
3607 hci_dev_lock(hdev);
3608
3609 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3610 if (cmd) {
3611 cmd->cmd_complete(cmd, mgmt_status(status));
3612 mgmt_pending_remove(cmd);
3613 }
3614
3615 hci_dev_unlock(hdev);
3616 }
3617
3618 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3619 u16 len)
3620 {
3621 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3622 struct mgmt_pending_cmd *cmd;
3623 int err;
3624
3625 BT_DBG("%s", hdev->name);
3626
3627 hci_dev_lock(hdev);
3628
3629 if (!hci_discovery_active(hdev)) {
3630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3631 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3632 sizeof(mgmt_cp->type));
3633 goto unlock;
3634 }
3635
3636 if (hdev->discovery.type != mgmt_cp->type) {
3637 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3638 MGMT_STATUS_INVALID_PARAMS,
3639 &mgmt_cp->type, sizeof(mgmt_cp->type));
3640 goto unlock;
3641 }
3642
3643 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3644 if (!cmd) {
3645 err = -ENOMEM;
3646 goto unlock;
3647 }
3648
3649 cmd->cmd_complete = generic_cmd_complete;
3650
3651 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3652 queue_work(hdev->req_workqueue, &hdev->discov_update);
3653 err = 0;
3654
3655 unlock:
3656 hci_dev_unlock(hdev);
3657 return err;
3658 }
3659
3660 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3661 u16 len)
3662 {
3663 struct mgmt_cp_confirm_name *cp = data;
3664 struct inquiry_entry *e;
3665 int err;
3666
3667 BT_DBG("%s", hdev->name);
3668
3669 hci_dev_lock(hdev);
3670
3671 if (!hci_discovery_active(hdev)) {
3672 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3673 MGMT_STATUS_FAILED, &cp->addr,
3674 sizeof(cp->addr));
3675 goto failed;
3676 }
3677
3678 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3679 if (!e) {
3680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3681 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3682 sizeof(cp->addr));
3683 goto failed;
3684 }
3685
3686 if (cp->name_known) {
3687 e->name_state = NAME_KNOWN;
3688 list_del(&e->list);
3689 } else {
3690 e->name_state = NAME_NEEDED;
3691 hci_inquiry_cache_update_resolve(hdev, e);
3692 }
3693
3694 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3695 &cp->addr, sizeof(cp->addr));
3696
3697 failed:
3698 hci_dev_unlock(hdev);
3699 return err;
3700 }
3701
3702 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3703 u16 len)
3704 {
3705 struct mgmt_cp_block_device *cp = data;
3706 u8 status;
3707 int err;
3708
3709 BT_DBG("%s", hdev->name);
3710
3711 if (!bdaddr_type_is_valid(cp->addr.type))
3712 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3713 MGMT_STATUS_INVALID_PARAMS,
3714 &cp->addr, sizeof(cp->addr));
3715
3716 hci_dev_lock(hdev);
3717
3718 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3719 cp->addr.type);
3720 if (err < 0) {
3721 status = MGMT_STATUS_FAILED;
3722 goto done;
3723 }
3724
3725 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3726 sk);
3727 status = MGMT_STATUS_SUCCESS;
3728
3729 done:
3730 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3731 &cp->addr, sizeof(cp->addr));
3732
3733 hci_dev_unlock(hdev);
3734
3735 return err;
3736 }
3737
3738 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3739 u16 len)
3740 {
3741 struct mgmt_cp_unblock_device *cp = data;
3742 u8 status;
3743 int err;
3744
3745 BT_DBG("%s", hdev->name);
3746
3747 if (!bdaddr_type_is_valid(cp->addr.type))
3748 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3749 MGMT_STATUS_INVALID_PARAMS,
3750 &cp->addr, sizeof(cp->addr));
3751
3752 hci_dev_lock(hdev);
3753
3754 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3755 cp->addr.type);
3756 if (err < 0) {
3757 status = MGMT_STATUS_INVALID_PARAMS;
3758 goto done;
3759 }
3760
3761 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3762 sk);
3763 status = MGMT_STATUS_SUCCESS;
3764
3765 done:
3766 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3767 &cp->addr, sizeof(cp->addr));
3768
3769 hci_dev_unlock(hdev);
3770
3771 return err;
3772 }
3773
3774 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3775 u16 len)
3776 {
3777 struct mgmt_cp_set_device_id *cp = data;
3778 struct hci_request req;
3779 int err;
3780 __u16 source;
3781
3782 BT_DBG("%s", hdev->name);
3783
3784 source = __le16_to_cpu(cp->source);
3785
3786 if (source > 0x0002)
3787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3788 MGMT_STATUS_INVALID_PARAMS);
3789
3790 hci_dev_lock(hdev);
3791
3792 hdev->devid_source = source;
3793 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3794 hdev->devid_product = __le16_to_cpu(cp->product);
3795 hdev->devid_version = __le16_to_cpu(cp->version);
3796
3797 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3798 NULL, 0);
3799
3800 hci_req_init(&req, hdev);
3801 __hci_req_update_eir(&req);
3802 hci_req_run(&req, NULL);
3803
3804 hci_dev_unlock(hdev);
3805
3806 return err;
3807 }
3808
3809 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3810 u16 opcode)
3811 {
3812 BT_DBG("status %d", status);
3813 }
3814
3815 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3816 u16 opcode)
3817 {
3818 struct cmd_lookup match = { NULL, hdev };
3819 struct hci_request req;
3820 u8 instance;
3821 struct adv_info *adv_instance;
3822 int err;
3823
3824 hci_dev_lock(hdev);
3825
3826 if (status) {
3827 u8 mgmt_err = mgmt_status(status);
3828
3829 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3830 cmd_status_rsp, &mgmt_err);
3831 goto unlock;
3832 }
3833
3834 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3835 hci_dev_set_flag(hdev, HCI_ADVERTISING);
3836 else
3837 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3838
3839 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3840 &match);
3841
3842 new_settings(hdev, match.sk);
3843
3844 if (match.sk)
3845 sock_put(match.sk);
3846
3847 /* If "Set Advertising" was just disabled and instance advertising was
3848 * set up earlier, then re-enable multi-instance advertising.
3849 */
3850 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3851 list_empty(&hdev->adv_instances))
3852 goto unlock;
3853
3854 instance = hdev->cur_adv_instance;
3855 if (!instance) {
3856 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3857 struct adv_info, list);
3858 if (!adv_instance)
3859 goto unlock;
3860
3861 instance = adv_instance->instance;
3862 }
3863
3864 hci_req_init(&req, hdev);
3865
3866 err = __hci_req_schedule_adv_instance(&req, instance, true);
3867
3868 if (!err)
3869 err = hci_req_run(&req, enable_advertising_instance);
3870
3871 if (err)
3872 BT_ERR("Failed to re-configure advertising");
3873
3874 unlock:
3875 hci_dev_unlock(hdev);
3876 }
3877
3878 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3879 u16 len)
3880 {
3881 struct mgmt_mode *cp = data;
3882 struct mgmt_pending_cmd *cmd;
3883 struct hci_request req;
3884 u8 val, status;
3885 int err;
3886
3887 BT_DBG("request for %s", hdev->name);
3888
3889 status = mgmt_le_support(hdev);
3890 if (status)
3891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3892 status);
3893
3894 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3896 MGMT_STATUS_INVALID_PARAMS);
3897
3898 hci_dev_lock(hdev);
3899
3900 val = !!cp->val;
3901
3902 /* The following conditions are ones which mean that we should
3903 * not do any HCI communication but directly send a mgmt
3904 * response to user space (after toggling the flag if
3905 * necessary).
3906 */
3907 if (!hdev_is_powered(hdev) ||
3908 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3909 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3910 hci_conn_num(hdev, LE_LINK) > 0 ||
3911 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3912 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3913 bool changed;
3914
3915 if (cp->val) {
3916 hdev->cur_adv_instance = 0x00;
3917 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3918 if (cp->val == 0x02)
3919 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3920 else
3921 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3922 } else {
3923 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
3924 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3925 }
3926
3927 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3928 if (err < 0)
3929 goto unlock;
3930
3931 if (changed)
3932 err = new_settings(hdev, sk);
3933
3934 goto unlock;
3935 }
3936
3937 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3938 pending_find(MGMT_OP_SET_LE, hdev)) {
3939 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3940 MGMT_STATUS_BUSY);
3941 goto unlock;
3942 }
3943
3944 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3945 if (!cmd) {
3946 err = -ENOMEM;
3947 goto unlock;
3948 }
3949
3950 hci_req_init(&req, hdev);
3951
3952 if (cp->val == 0x02)
3953 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3954 else
3955 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3956
3957 cancel_adv_timeout(hdev);
3958
3959 if (val) {
3960 /* Switch to instance "0" for the Set Advertising setting.
3961 * We cannot use update_[adv|scan_rsp]_data() here as the
3962 * HCI_ADVERTISING flag is not yet set.
3963 */
3964 hdev->cur_adv_instance = 0x00;
3965 __hci_req_update_adv_data(&req, 0x00);
3966 __hci_req_update_scan_rsp_data(&req, 0x00);
3967 __hci_req_enable_advertising(&req);
3968 } else {
3969 __hci_req_disable_advertising(&req);
3970 }
3971
3972 err = hci_req_run(&req, set_advertising_complete);
3973 if (err < 0)
3974 mgmt_pending_remove(cmd);
3975
3976 unlock:
3977 hci_dev_unlock(hdev);
3978 return err;
3979 }
3980
3981 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3982 void *data, u16 len)
3983 {
3984 struct mgmt_cp_set_static_address *cp = data;
3985 int err;
3986
3987 BT_DBG("%s", hdev->name);
3988
3989 if (!lmp_le_capable(hdev))
3990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3991 MGMT_STATUS_NOT_SUPPORTED);
3992
3993 if (hdev_is_powered(hdev))
3994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3995 MGMT_STATUS_REJECTED);
3996
3997 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3998 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3999 return mgmt_cmd_status(sk, hdev->id,
4000 MGMT_OP_SET_STATIC_ADDRESS,
4001 MGMT_STATUS_INVALID_PARAMS);
4002
4003 /* Two most significant bits shall be set */
4004 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4005 return mgmt_cmd_status(sk, hdev->id,
4006 MGMT_OP_SET_STATIC_ADDRESS,
4007 MGMT_STATUS_INVALID_PARAMS);
4008 }
4009
4010 hci_dev_lock(hdev);
4011
4012 bacpy(&hdev->static_addr, &cp->bdaddr);
4013
4014 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4015 if (err < 0)
4016 goto unlock;
4017
4018 err = new_settings(hdev, sk);
4019
4020 unlock:
4021 hci_dev_unlock(hdev);
4022 return err;
4023 }
4024
4025 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4026 void *data, u16 len)
4027 {
4028 struct mgmt_cp_set_scan_params *cp = data;
4029 __u16 interval, window;
4030 int err;
4031
4032 BT_DBG("%s", hdev->name);
4033
4034 if (!lmp_le_capable(hdev))
4035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4036 MGMT_STATUS_NOT_SUPPORTED);
4037
4038 interval = __le16_to_cpu(cp->interval);
4039
4040 if (interval < 0x0004 || interval > 0x4000)
4041 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4042 MGMT_STATUS_INVALID_PARAMS);
4043
4044 window = __le16_to_cpu(cp->window);
4045
4046 if (window < 0x0004 || window > 0x4000)
4047 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4048 MGMT_STATUS_INVALID_PARAMS);
4049
4050 if (window > interval)
4051 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4052 MGMT_STATUS_INVALID_PARAMS);
4053
4054 hci_dev_lock(hdev);
4055
4056 hdev->le_scan_interval = interval;
4057 hdev->le_scan_window = window;
4058
4059 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4060 NULL, 0);
4061
4062 /* If background scan is running, restart it so new parameters are
4063 * loaded.
4064 */
4065 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4066 hdev->discovery.state == DISCOVERY_STOPPED) {
4067 struct hci_request req;
4068
4069 hci_req_init(&req, hdev);
4070
4071 hci_req_add_le_scan_disable(&req);
4072 hci_req_add_le_passive_scan(&req);
4073
4074 hci_req_run(&req, NULL);
4075 }
4076
4077 hci_dev_unlock(hdev);
4078
4079 return err;
4080 }
4081
4082 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4083 u16 opcode)
4084 {
4085 struct mgmt_pending_cmd *cmd;
4086
4087 BT_DBG("status 0x%02x", status);
4088
4089 hci_dev_lock(hdev);
4090
4091 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4092 if (!cmd)
4093 goto unlock;
4094
4095 if (status) {
4096 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4097 mgmt_status(status));
4098 } else {
4099 struct mgmt_mode *cp = cmd->param;
4100
4101 if (cp->val)
4102 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4103 else
4104 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4105
4106 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4107 new_settings(hdev, cmd->sk);
4108 }
4109
4110 mgmt_pending_remove(cmd);
4111
4112 unlock:
4113 hci_dev_unlock(hdev);
4114 }
4115
4116 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4117 void *data, u16 len)
4118 {
4119 struct mgmt_mode *cp = data;
4120 struct mgmt_pending_cmd *cmd;
4121 struct hci_request req;
4122 int err;
4123
4124 BT_DBG("%s", hdev->name);
4125
4126 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4127 hdev->hci_ver < BLUETOOTH_VER_1_2)
4128 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4129 MGMT_STATUS_NOT_SUPPORTED);
4130
4131 if (cp->val != 0x00 && cp->val != 0x01)
4132 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4133 MGMT_STATUS_INVALID_PARAMS);
4134
4135 hci_dev_lock(hdev);
4136
4137 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4138 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4139 MGMT_STATUS_BUSY);
4140 goto unlock;
4141 }
4142
4143 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4144 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4145 hdev);
4146 goto unlock;
4147 }
4148
4149 if (!hdev_is_powered(hdev)) {
4150 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4151 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4152 hdev);
4153 new_settings(hdev, sk);
4154 goto unlock;
4155 }
4156
4157 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4158 data, len);
4159 if (!cmd) {
4160 err = -ENOMEM;
4161 goto unlock;
4162 }
4163
4164 hci_req_init(&req, hdev);
4165
4166 __hci_req_write_fast_connectable(&req, cp->val);
4167
4168 err = hci_req_run(&req, fast_connectable_complete);
4169 if (err < 0) {
4170 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4171 MGMT_STATUS_FAILED);
4172 mgmt_pending_remove(cmd);
4173 }
4174
4175 unlock:
4176 hci_dev_unlock(hdev);
4177
4178 return err;
4179 }
4180
4181 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4182 {
4183 struct mgmt_pending_cmd *cmd;
4184
4185 BT_DBG("status 0x%02x", status);
4186
4187 hci_dev_lock(hdev);
4188
4189 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4190 if (!cmd)
4191 goto unlock;
4192
4193 if (status) {
4194 u8 mgmt_err = mgmt_status(status);
4195
4196 /* We need to restore the flag if related HCI commands
4197 * failed.
4198 */
4199 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4200
4201 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4202 } else {
4203 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4204 new_settings(hdev, cmd->sk);
4205 }
4206
4207 mgmt_pending_remove(cmd);
4208
4209 unlock:
4210 hci_dev_unlock(hdev);
4211 }
4212
4213 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4214 {
4215 struct mgmt_mode *cp = data;
4216 struct mgmt_pending_cmd *cmd;
4217 struct hci_request req;
4218 int err;
4219
4220 BT_DBG("request for %s", hdev->name);
4221
4222 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4224 MGMT_STATUS_NOT_SUPPORTED);
4225
4226 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4227 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4228 MGMT_STATUS_REJECTED);
4229
4230 if (cp->val != 0x00 && cp->val != 0x01)
4231 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4232 MGMT_STATUS_INVALID_PARAMS);
4233
4234 hci_dev_lock(hdev);
4235
4236 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4237 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4238 goto unlock;
4239 }
4240
4241 if (!hdev_is_powered(hdev)) {
4242 if (!cp->val) {
4243 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4244 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4245 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4246 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4247 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4248 }
4249
4250 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4251
4252 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4253 if (err < 0)
4254 goto unlock;
4255
4256 err = new_settings(hdev, sk);
4257 goto unlock;
4258 }
4259
4260 /* Reject disabling when powered on */
4261 if (!cp->val) {
4262 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4263 MGMT_STATUS_REJECTED);
4264 goto unlock;
4265 } else {
4266 /* When configuring a dual-mode controller to operate
4267 * with LE only and using a static address, then switching
4268 * BR/EDR back on is not allowed.
4269 *
4270 * Dual-mode controllers shall operate with the public
4271 * address as its identity address for BR/EDR and LE. So
4272 * reject the attempt to create an invalid configuration.
4273 *
4274 * The same restrictions applies when secure connections
4275 * has been enabled. For BR/EDR this is a controller feature
4276 * while for LE it is a host stack feature. This means that
4277 * switching BR/EDR back on when secure connections has been
4278 * enabled is not a supported transaction.
4279 */
4280 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4281 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4282 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4284 MGMT_STATUS_REJECTED);
4285 goto unlock;
4286 }
4287 }
4288
4289 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4291 MGMT_STATUS_BUSY);
4292 goto unlock;
4293 }
4294
4295 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4296 if (!cmd) {
4297 err = -ENOMEM;
4298 goto unlock;
4299 }
4300
4301 /* We need to flip the bit already here so that
4302 * hci_req_update_adv_data generates the correct flags.
4303 */
4304 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4305
4306 hci_req_init(&req, hdev);
4307
4308 __hci_req_write_fast_connectable(&req, false);
4309 __hci_req_update_scan(&req);
4310
4311 /* Since only the advertising data flags will change, there
4312 * is no need to update the scan response data.
4313 */
4314 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4315
4316 err = hci_req_run(&req, set_bredr_complete);
4317 if (err < 0)
4318 mgmt_pending_remove(cmd);
4319
4320 unlock:
4321 hci_dev_unlock(hdev);
4322 return err;
4323 }
4324
4325 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4326 {
4327 struct mgmt_pending_cmd *cmd;
4328 struct mgmt_mode *cp;
4329
4330 BT_DBG("%s status %u", hdev->name, status);
4331
4332 hci_dev_lock(hdev);
4333
4334 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4335 if (!cmd)
4336 goto unlock;
4337
4338 if (status) {
4339 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4340 mgmt_status(status));
4341 goto remove;
4342 }
4343
4344 cp = cmd->param;
4345
4346 switch (cp->val) {
4347 case 0x00:
4348 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4349 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4350 break;
4351 case 0x01:
4352 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4353 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4354 break;
4355 case 0x02:
4356 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4357 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4358 break;
4359 }
4360
4361 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4362 new_settings(hdev, cmd->sk);
4363
4364 remove:
4365 mgmt_pending_remove(cmd);
4366 unlock:
4367 hci_dev_unlock(hdev);
4368 }
4369
4370 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4371 void *data, u16 len)
4372 {
4373 struct mgmt_mode *cp = data;
4374 struct mgmt_pending_cmd *cmd;
4375 struct hci_request req;
4376 u8 val;
4377 int err;
4378
4379 BT_DBG("request for %s", hdev->name);
4380
4381 if (!lmp_sc_capable(hdev) &&
4382 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4384 MGMT_STATUS_NOT_SUPPORTED);
4385
4386 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4387 lmp_sc_capable(hdev) &&
4388 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4389 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4390 MGMT_STATUS_REJECTED);
4391
4392 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4394 MGMT_STATUS_INVALID_PARAMS);
4395
4396 hci_dev_lock(hdev);
4397
4398 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4399 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4400 bool changed;
4401
4402 if (cp->val) {
4403 changed = !hci_dev_test_and_set_flag(hdev,
4404 HCI_SC_ENABLED);
4405 if (cp->val == 0x02)
4406 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4407 else
4408 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4409 } else {
4410 changed = hci_dev_test_and_clear_flag(hdev,
4411 HCI_SC_ENABLED);
4412 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4413 }
4414
4415 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4416 if (err < 0)
4417 goto failed;
4418
4419 if (changed)
4420 err = new_settings(hdev, sk);
4421
4422 goto failed;
4423 }
4424
4425 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4427 MGMT_STATUS_BUSY);
4428 goto failed;
4429 }
4430
4431 val = !!cp->val;
4432
4433 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4434 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4435 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4436 goto failed;
4437 }
4438
4439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4440 if (!cmd) {
4441 err = -ENOMEM;
4442 goto failed;
4443 }
4444
4445 hci_req_init(&req, hdev);
4446 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4447 err = hci_req_run(&req, sc_enable_complete);
4448 if (err < 0) {
4449 mgmt_pending_remove(cmd);
4450 goto failed;
4451 }
4452
4453 failed:
4454 hci_dev_unlock(hdev);
4455 return err;
4456 }
4457
4458 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4459 void *data, u16 len)
4460 {
4461 struct mgmt_mode *cp = data;
4462 bool changed, use_changed;
4463 int err;
4464
4465 BT_DBG("request for %s", hdev->name);
4466
4467 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4469 MGMT_STATUS_INVALID_PARAMS);
4470
4471 hci_dev_lock(hdev);
4472
4473 if (cp->val)
4474 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4475 else
4476 changed = hci_dev_test_and_clear_flag(hdev,
4477 HCI_KEEP_DEBUG_KEYS);
4478
4479 if (cp->val == 0x02)
4480 use_changed = !hci_dev_test_and_set_flag(hdev,
4481 HCI_USE_DEBUG_KEYS);
4482 else
4483 use_changed = hci_dev_test_and_clear_flag(hdev,
4484 HCI_USE_DEBUG_KEYS);
4485
4486 if (hdev_is_powered(hdev) && use_changed &&
4487 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4488 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4489 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4490 sizeof(mode), &mode);
4491 }
4492
4493 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4494 if (err < 0)
4495 goto unlock;
4496
4497 if (changed)
4498 err = new_settings(hdev, sk);
4499
4500 unlock:
4501 hci_dev_unlock(hdev);
4502 return err;
4503 }
4504
4505 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4506 u16 len)
4507 {
4508 struct mgmt_cp_set_privacy *cp = cp_data;
4509 bool changed;
4510 int err;
4511
4512 BT_DBG("request for %s", hdev->name);
4513
4514 if (!lmp_le_capable(hdev))
4515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4516 MGMT_STATUS_NOT_SUPPORTED);
4517
4518 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4520 MGMT_STATUS_INVALID_PARAMS);
4521
4522 if (hdev_is_powered(hdev))
4523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4524 MGMT_STATUS_REJECTED);
4525
4526 hci_dev_lock(hdev);
4527
4528 /* If user space supports this command it is also expected to
4529 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4530 */
4531 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4532
4533 if (cp->privacy) {
4534 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4535 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4536 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4537 if (cp->privacy == 0x02)
4538 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4539 else
4540 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4541 } else {
4542 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4543 memset(hdev->irk, 0, sizeof(hdev->irk));
4544 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4545 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4546 }
4547
4548 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4549 if (err < 0)
4550 goto unlock;
4551
4552 if (changed)
4553 err = new_settings(hdev, sk);
4554
4555 unlock:
4556 hci_dev_unlock(hdev);
4557 return err;
4558 }
4559
4560 static bool irk_is_valid(struct mgmt_irk_info *irk)
4561 {
4562 switch (irk->addr.type) {
4563 case BDADDR_LE_PUBLIC:
4564 return true;
4565
4566 case BDADDR_LE_RANDOM:
4567 /* Two most significant bits shall be set */
4568 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4569 return false;
4570 return true;
4571 }
4572
4573 return false;
4574 }
4575
4576 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4577 u16 len)
4578 {
4579 struct mgmt_cp_load_irks *cp = cp_data;
4580 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4581 sizeof(struct mgmt_irk_info));
4582 u16 irk_count, expected_len;
4583 int i, err;
4584
4585 BT_DBG("request for %s", hdev->name);
4586
4587 if (!lmp_le_capable(hdev))
4588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4589 MGMT_STATUS_NOT_SUPPORTED);
4590
4591 irk_count = __le16_to_cpu(cp->irk_count);
4592 if (irk_count > max_irk_count) {
4593 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4595 MGMT_STATUS_INVALID_PARAMS);
4596 }
4597
4598 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4599 if (expected_len != len) {
4600 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4601 expected_len, len);
4602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4603 MGMT_STATUS_INVALID_PARAMS);
4604 }
4605
4606 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4607
4608 for (i = 0; i < irk_count; i++) {
4609 struct mgmt_irk_info *key = &cp->irks[i];
4610
4611 if (!irk_is_valid(key))
4612 return mgmt_cmd_status(sk, hdev->id,
4613 MGMT_OP_LOAD_IRKS,
4614 MGMT_STATUS_INVALID_PARAMS);
4615 }
4616
4617 hci_dev_lock(hdev);
4618
4619 hci_smp_irks_clear(hdev);
4620
4621 for (i = 0; i < irk_count; i++) {
4622 struct mgmt_irk_info *irk = &cp->irks[i];
4623
4624 hci_add_irk(hdev, &irk->addr.bdaddr,
4625 le_addr_type(irk->addr.type), irk->val,
4626 BDADDR_ANY);
4627 }
4628
4629 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4630
4631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4632
4633 hci_dev_unlock(hdev);
4634
4635 return err;
4636 }
4637
4638 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4639 {
4640 if (key->master != 0x00 && key->master != 0x01)
4641 return false;
4642
4643 switch (key->addr.type) {
4644 case BDADDR_LE_PUBLIC:
4645 return true;
4646
4647 case BDADDR_LE_RANDOM:
4648 /* Two most significant bits shall be set */
4649 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4650 return false;
4651 return true;
4652 }
4653
4654 return false;
4655 }
4656
4657 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4658 void *cp_data, u16 len)
4659 {
4660 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4661 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4662 sizeof(struct mgmt_ltk_info));
4663 u16 key_count, expected_len;
4664 int i, err;
4665
4666 BT_DBG("request for %s", hdev->name);
4667
4668 if (!lmp_le_capable(hdev))
4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4670 MGMT_STATUS_NOT_SUPPORTED);
4671
4672 key_count = __le16_to_cpu(cp->key_count);
4673 if (key_count > max_key_count) {
4674 BT_ERR("load_ltks: too big key_count value %u", key_count);
4675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4676 MGMT_STATUS_INVALID_PARAMS);
4677 }
4678
4679 expected_len = sizeof(*cp) + key_count *
4680 sizeof(struct mgmt_ltk_info);
4681 if (expected_len != len) {
4682 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4683 expected_len, len);
4684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4685 MGMT_STATUS_INVALID_PARAMS);
4686 }
4687
4688 BT_DBG("%s key_count %u", hdev->name, key_count);
4689
4690 for (i = 0; i < key_count; i++) {
4691 struct mgmt_ltk_info *key = &cp->keys[i];
4692
4693 if (!ltk_is_valid(key))
4694 return mgmt_cmd_status(sk, hdev->id,
4695 MGMT_OP_LOAD_LONG_TERM_KEYS,
4696 MGMT_STATUS_INVALID_PARAMS);
4697 }
4698
4699 hci_dev_lock(hdev);
4700
4701 hci_smp_ltks_clear(hdev);
4702
4703 for (i = 0; i < key_count; i++) {
4704 struct mgmt_ltk_info *key = &cp->keys[i];
4705 u8 type, authenticated;
4706
4707 switch (key->type) {
4708 case MGMT_LTK_UNAUTHENTICATED:
4709 authenticated = 0x00;
4710 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4711 break;
4712 case MGMT_LTK_AUTHENTICATED:
4713 authenticated = 0x01;
4714 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4715 break;
4716 case MGMT_LTK_P256_UNAUTH:
4717 authenticated = 0x00;
4718 type = SMP_LTK_P256;
4719 break;
4720 case MGMT_LTK_P256_AUTH:
4721 authenticated = 0x01;
4722 type = SMP_LTK_P256;
4723 break;
4724 case MGMT_LTK_P256_DEBUG:
4725 authenticated = 0x00;
4726 type = SMP_LTK_P256_DEBUG;
4727 default:
4728 continue;
4729 }
4730
4731 hci_add_ltk(hdev, &key->addr.bdaddr,
4732 le_addr_type(key->addr.type), type, authenticated,
4733 key->val, key->enc_size, key->ediv, key->rand);
4734 }
4735
4736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4737 NULL, 0);
4738
4739 hci_dev_unlock(hdev);
4740
4741 return err;
4742 }
4743
4744 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4745 {
4746 struct hci_conn *conn = cmd->user_data;
4747 struct mgmt_rp_get_conn_info rp;
4748 int err;
4749
4750 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4751
4752 if (status == MGMT_STATUS_SUCCESS) {
4753 rp.rssi = conn->rssi;
4754 rp.tx_power = conn->tx_power;
4755 rp.max_tx_power = conn->max_tx_power;
4756 } else {
4757 rp.rssi = HCI_RSSI_INVALID;
4758 rp.tx_power = HCI_TX_POWER_INVALID;
4759 rp.max_tx_power = HCI_TX_POWER_INVALID;
4760 }
4761
4762 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4763 status, &rp, sizeof(rp));
4764
4765 hci_conn_drop(conn);
4766 hci_conn_put(conn);
4767
4768 return err;
4769 }
4770
4771 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4772 u16 opcode)
4773 {
4774 struct hci_cp_read_rssi *cp;
4775 struct mgmt_pending_cmd *cmd;
4776 struct hci_conn *conn;
4777 u16 handle;
4778 u8 status;
4779
4780 BT_DBG("status 0x%02x", hci_status);
4781
4782 hci_dev_lock(hdev);
4783
4784 /* Commands sent in request are either Read RSSI or Read Transmit Power
4785 * Level so we check which one was last sent to retrieve connection
4786 * handle. Both commands have handle as first parameter so it's safe to
4787 * cast data on the same command struct.
4788 *
4789 * First command sent is always Read RSSI and we fail only if it fails.
4790 * In other case we simply override error to indicate success as we
4791 * already remembered if TX power value is actually valid.
4792 */
4793 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4794 if (!cp) {
4795 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4796 status = MGMT_STATUS_SUCCESS;
4797 } else {
4798 status = mgmt_status(hci_status);
4799 }
4800
4801 if (!cp) {
4802 BT_ERR("invalid sent_cmd in conn_info response");
4803 goto unlock;
4804 }
4805
4806 handle = __le16_to_cpu(cp->handle);
4807 conn = hci_conn_hash_lookup_handle(hdev, handle);
4808 if (!conn) {
4809 BT_ERR("unknown handle (%d) in conn_info response", handle);
4810 goto unlock;
4811 }
4812
4813 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4814 if (!cmd)
4815 goto unlock;
4816
4817 cmd->cmd_complete(cmd, status);
4818 mgmt_pending_remove(cmd);
4819
4820 unlock:
4821 hci_dev_unlock(hdev);
4822 }
4823
4824 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4825 u16 len)
4826 {
4827 struct mgmt_cp_get_conn_info *cp = data;
4828 struct mgmt_rp_get_conn_info rp;
4829 struct hci_conn *conn;
4830 unsigned long conn_info_age;
4831 int err = 0;
4832
4833 BT_DBG("%s", hdev->name);
4834
4835 memset(&rp, 0, sizeof(rp));
4836 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4837 rp.addr.type = cp->addr.type;
4838
4839 if (!bdaddr_type_is_valid(cp->addr.type))
4840 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4841 MGMT_STATUS_INVALID_PARAMS,
4842 &rp, sizeof(rp));
4843
4844 hci_dev_lock(hdev);
4845
4846 if (!hdev_is_powered(hdev)) {
4847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4848 MGMT_STATUS_NOT_POWERED, &rp,
4849 sizeof(rp));
4850 goto unlock;
4851 }
4852
4853 if (cp->addr.type == BDADDR_BREDR)
4854 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4855 &cp->addr.bdaddr);
4856 else
4857 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4858
4859 if (!conn || conn->state != BT_CONNECTED) {
4860 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4861 MGMT_STATUS_NOT_CONNECTED, &rp,
4862 sizeof(rp));
4863 goto unlock;
4864 }
4865
4866 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4867 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4868 MGMT_STATUS_BUSY, &rp, sizeof(rp));
4869 goto unlock;
4870 }
4871
4872 /* To avoid client trying to guess when to poll again for information we
4873 * calculate conn info age as random value between min/max set in hdev.
4874 */
4875 conn_info_age = hdev->conn_info_min_age +
4876 prandom_u32_max(hdev->conn_info_max_age -
4877 hdev->conn_info_min_age);
4878
4879 /* Query controller to refresh cached values if they are too old or were
4880 * never read.
4881 */
4882 if (time_after(jiffies, conn->conn_info_timestamp +
4883 msecs_to_jiffies(conn_info_age)) ||
4884 !conn->conn_info_timestamp) {
4885 struct hci_request req;
4886 struct hci_cp_read_tx_power req_txp_cp;
4887 struct hci_cp_read_rssi req_rssi_cp;
4888 struct mgmt_pending_cmd *cmd;
4889
4890 hci_req_init(&req, hdev);
4891 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4892 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4893 &req_rssi_cp);
4894
4895 /* For LE links TX power does not change thus we don't need to
4896 * query for it once value is known.
4897 */
4898 if (!bdaddr_type_is_le(cp->addr.type) ||
4899 conn->tx_power == HCI_TX_POWER_INVALID) {
4900 req_txp_cp.handle = cpu_to_le16(conn->handle);
4901 req_txp_cp.type = 0x00;
4902 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4903 sizeof(req_txp_cp), &req_txp_cp);
4904 }
4905
4906 /* Max TX power needs to be read only once per connection */
4907 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4908 req_txp_cp.handle = cpu_to_le16(conn->handle);
4909 req_txp_cp.type = 0x01;
4910 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4911 sizeof(req_txp_cp), &req_txp_cp);
4912 }
4913
4914 err = hci_req_run(&req, conn_info_refresh_complete);
4915 if (err < 0)
4916 goto unlock;
4917
4918 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4919 data, len);
4920 if (!cmd) {
4921 err = -ENOMEM;
4922 goto unlock;
4923 }
4924
4925 hci_conn_hold(conn);
4926 cmd->user_data = hci_conn_get(conn);
4927 cmd->cmd_complete = conn_info_cmd_complete;
4928
4929 conn->conn_info_timestamp = jiffies;
4930 } else {
4931 /* Cache is valid, just reply with values cached in hci_conn */
4932 rp.rssi = conn->rssi;
4933 rp.tx_power = conn->tx_power;
4934 rp.max_tx_power = conn->max_tx_power;
4935
4936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4937 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4938 }
4939
4940 unlock:
4941 hci_dev_unlock(hdev);
4942 return err;
4943 }
4944
4945 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4946 {
4947 struct hci_conn *conn = cmd->user_data;
4948 struct mgmt_rp_get_clock_info rp;
4949 struct hci_dev *hdev;
4950 int err;
4951
4952 memset(&rp, 0, sizeof(rp));
4953 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4954
4955 if (status)
4956 goto complete;
4957
4958 hdev = hci_dev_get(cmd->index);
4959 if (hdev) {
4960 rp.local_clock = cpu_to_le32(hdev->clock);
4961 hci_dev_put(hdev);
4962 }
4963
4964 if (conn) {
4965 rp.piconet_clock = cpu_to_le32(conn->clock);
4966 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4967 }
4968
4969 complete:
4970 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
4971 sizeof(rp));
4972
4973 if (conn) {
4974 hci_conn_drop(conn);
4975 hci_conn_put(conn);
4976 }
4977
4978 return err;
4979 }
4980
4981 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4982 {
4983 struct hci_cp_read_clock *hci_cp;
4984 struct mgmt_pending_cmd *cmd;
4985 struct hci_conn *conn;
4986
4987 BT_DBG("%s status %u", hdev->name, status);
4988
4989 hci_dev_lock(hdev);
4990
4991 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4992 if (!hci_cp)
4993 goto unlock;
4994
4995 if (hci_cp->which) {
4996 u16 handle = __le16_to_cpu(hci_cp->handle);
4997 conn = hci_conn_hash_lookup_handle(hdev, handle);
4998 } else {
4999 conn = NULL;
5000 }
5001
5002 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5003 if (!cmd)
5004 goto unlock;
5005
5006 cmd->cmd_complete(cmd, mgmt_status(status));
5007 mgmt_pending_remove(cmd);
5008
5009 unlock:
5010 hci_dev_unlock(hdev);
5011 }
5012
5013 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5014 u16 len)
5015 {
5016 struct mgmt_cp_get_clock_info *cp = data;
5017 struct mgmt_rp_get_clock_info rp;
5018 struct hci_cp_read_clock hci_cp;
5019 struct mgmt_pending_cmd *cmd;
5020 struct hci_request req;
5021 struct hci_conn *conn;
5022 int err;
5023
5024 BT_DBG("%s", hdev->name);
5025
5026 memset(&rp, 0, sizeof(rp));
5027 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5028 rp.addr.type = cp->addr.type;
5029
5030 if (cp->addr.type != BDADDR_BREDR)
5031 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5032 MGMT_STATUS_INVALID_PARAMS,
5033 &rp, sizeof(rp));
5034
5035 hci_dev_lock(hdev);
5036
5037 if (!hdev_is_powered(hdev)) {
5038 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5039 MGMT_STATUS_NOT_POWERED, &rp,
5040 sizeof(rp));
5041 goto unlock;
5042 }
5043
5044 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5046 &cp->addr.bdaddr);
5047 if (!conn || conn->state != BT_CONNECTED) {
5048 err = mgmt_cmd_complete(sk, hdev->id,
5049 MGMT_OP_GET_CLOCK_INFO,
5050 MGMT_STATUS_NOT_CONNECTED,
5051 &rp, sizeof(rp));
5052 goto unlock;
5053 }
5054 } else {
5055 conn = NULL;
5056 }
5057
5058 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5059 if (!cmd) {
5060 err = -ENOMEM;
5061 goto unlock;
5062 }
5063
5064 cmd->cmd_complete = clock_info_cmd_complete;
5065
5066 hci_req_init(&req, hdev);
5067
5068 memset(&hci_cp, 0, sizeof(hci_cp));
5069 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5070
5071 if (conn) {
5072 hci_conn_hold(conn);
5073 cmd->user_data = hci_conn_get(conn);
5074
5075 hci_cp.handle = cpu_to_le16(conn->handle);
5076 hci_cp.which = 0x01; /* Piconet clock */
5077 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5078 }
5079
5080 err = hci_req_run(&req, get_clock_info_complete);
5081 if (err < 0)
5082 mgmt_pending_remove(cmd);
5083
5084 unlock:
5085 hci_dev_unlock(hdev);
5086 return err;
5087 }
5088
5089 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5090 {
5091 struct hci_conn *conn;
5092
5093 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5094 if (!conn)
5095 return false;
5096
5097 if (conn->dst_type != type)
5098 return false;
5099
5100 if (conn->state != BT_CONNECTED)
5101 return false;
5102
5103 return true;
5104 }
5105
5106 /* This function requires the caller holds hdev->lock */
5107 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5108 u8 addr_type, u8 auto_connect)
5109 {
5110 struct hci_conn_params *params;
5111
5112 params = hci_conn_params_add(hdev, addr, addr_type);
5113 if (!params)
5114 return -EIO;
5115
5116 if (params->auto_connect == auto_connect)
5117 return 0;
5118
5119 list_del_init(&params->action);
5120
5121 switch (auto_connect) {
5122 case HCI_AUTO_CONN_DISABLED:
5123 case HCI_AUTO_CONN_LINK_LOSS:
5124 /* If auto connect is being disabled when we're trying to
5125 * connect to device, keep connecting.
5126 */
5127 if (params->explicit_connect)
5128 list_add(&params->action, &hdev->pend_le_conns);
5129 break;
5130 case HCI_AUTO_CONN_REPORT:
5131 if (params->explicit_connect)
5132 list_add(&params->action, &hdev->pend_le_conns);
5133 else
5134 list_add(&params->action, &hdev->pend_le_reports);
5135 break;
5136 case HCI_AUTO_CONN_DIRECT:
5137 case HCI_AUTO_CONN_ALWAYS:
5138 if (!is_connected(hdev, addr, addr_type))
5139 list_add(&params->action, &hdev->pend_le_conns);
5140 break;
5141 }
5142
5143 params->auto_connect = auto_connect;
5144
5145 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5146 auto_connect);
5147
5148 return 0;
5149 }
5150
5151 static void device_added(struct sock *sk, struct hci_dev *hdev,
5152 bdaddr_t *bdaddr, u8 type, u8 action)
5153 {
5154 struct mgmt_ev_device_added ev;
5155
5156 bacpy(&ev.addr.bdaddr, bdaddr);
5157 ev.addr.type = type;
5158 ev.action = action;
5159
5160 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5161 }
5162
5163 static int add_device(struct sock *sk, struct hci_dev *hdev,
5164 void *data, u16 len)
5165 {
5166 struct mgmt_cp_add_device *cp = data;
5167 u8 auto_conn, addr_type;
5168 int err;
5169
5170 BT_DBG("%s", hdev->name);
5171
5172 if (!bdaddr_type_is_valid(cp->addr.type) ||
5173 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5174 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5175 MGMT_STATUS_INVALID_PARAMS,
5176 &cp->addr, sizeof(cp->addr));
5177
5178 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5179 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5180 MGMT_STATUS_INVALID_PARAMS,
5181 &cp->addr, sizeof(cp->addr));
5182
5183 hci_dev_lock(hdev);
5184
5185 if (cp->addr.type == BDADDR_BREDR) {
5186 /* Only incoming connections action is supported for now */
5187 if (cp->action != 0x01) {
5188 err = mgmt_cmd_complete(sk, hdev->id,
5189 MGMT_OP_ADD_DEVICE,
5190 MGMT_STATUS_INVALID_PARAMS,
5191 &cp->addr, sizeof(cp->addr));
5192 goto unlock;
5193 }
5194
5195 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5196 cp->addr.type);
5197 if (err)
5198 goto unlock;
5199
5200 hci_req_update_scan(hdev);
5201
5202 goto added;
5203 }
5204
5205 addr_type = le_addr_type(cp->addr.type);
5206
5207 if (cp->action == 0x02)
5208 auto_conn = HCI_AUTO_CONN_ALWAYS;
5209 else if (cp->action == 0x01)
5210 auto_conn = HCI_AUTO_CONN_DIRECT;
5211 else
5212 auto_conn = HCI_AUTO_CONN_REPORT;
5213
5214 /* Kernel internally uses conn_params with resolvable private
5215 * address, but Add Device allows only identity addresses.
5216 * Make sure it is enforced before calling
5217 * hci_conn_params_lookup.
5218 */
5219 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5221 MGMT_STATUS_INVALID_PARAMS,
5222 &cp->addr, sizeof(cp->addr));
5223 goto unlock;
5224 }
5225
5226 /* If the connection parameters don't exist for this device,
5227 * they will be created and configured with defaults.
5228 */
5229 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5230 auto_conn) < 0) {
5231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5232 MGMT_STATUS_FAILED, &cp->addr,
5233 sizeof(cp->addr));
5234 goto unlock;
5235 }
5236
5237 hci_update_background_scan(hdev);
5238
5239 added:
5240 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5241
5242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5243 MGMT_STATUS_SUCCESS, &cp->addr,
5244 sizeof(cp->addr));
5245
5246 unlock:
5247 hci_dev_unlock(hdev);
5248 return err;
5249 }
5250
5251 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5252 bdaddr_t *bdaddr, u8 type)
5253 {
5254 struct mgmt_ev_device_removed ev;
5255
5256 bacpy(&ev.addr.bdaddr, bdaddr);
5257 ev.addr.type = type;
5258
5259 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5260 }
5261
5262 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5263 void *data, u16 len)
5264 {
5265 struct mgmt_cp_remove_device *cp = data;
5266 int err;
5267
5268 BT_DBG("%s", hdev->name);
5269
5270 hci_dev_lock(hdev);
5271
5272 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5273 struct hci_conn_params *params;
5274 u8 addr_type;
5275
5276 if (!bdaddr_type_is_valid(cp->addr.type)) {
5277 err = mgmt_cmd_complete(sk, hdev->id,
5278 MGMT_OP_REMOVE_DEVICE,
5279 MGMT_STATUS_INVALID_PARAMS,
5280 &cp->addr, sizeof(cp->addr));
5281 goto unlock;
5282 }
5283
5284 if (cp->addr.type == BDADDR_BREDR) {
5285 err = hci_bdaddr_list_del(&hdev->whitelist,
5286 &cp->addr.bdaddr,
5287 cp->addr.type);
5288 if (err) {
5289 err = mgmt_cmd_complete(sk, hdev->id,
5290 MGMT_OP_REMOVE_DEVICE,
5291 MGMT_STATUS_INVALID_PARAMS,
5292 &cp->addr,
5293 sizeof(cp->addr));
5294 goto unlock;
5295 }
5296
5297 hci_req_update_scan(hdev);
5298
5299 device_removed(sk, hdev, &cp->addr.bdaddr,
5300 cp->addr.type);
5301 goto complete;
5302 }
5303
5304 addr_type = le_addr_type(cp->addr.type);
5305
5306 /* Kernel internally uses conn_params with resolvable private
5307 * address, but Remove Device allows only identity addresses.
5308 * Make sure it is enforced before calling
5309 * hci_conn_params_lookup.
5310 */
5311 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5312 err = mgmt_cmd_complete(sk, hdev->id,
5313 MGMT_OP_REMOVE_DEVICE,
5314 MGMT_STATUS_INVALID_PARAMS,
5315 &cp->addr, sizeof(cp->addr));
5316 goto unlock;
5317 }
5318
5319 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5320 addr_type);
5321 if (!params) {
5322 err = mgmt_cmd_complete(sk, hdev->id,
5323 MGMT_OP_REMOVE_DEVICE,
5324 MGMT_STATUS_INVALID_PARAMS,
5325 &cp->addr, sizeof(cp->addr));
5326 goto unlock;
5327 }
5328
5329 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5330 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5331 err = mgmt_cmd_complete(sk, hdev->id,
5332 MGMT_OP_REMOVE_DEVICE,
5333 MGMT_STATUS_INVALID_PARAMS,
5334 &cp->addr, sizeof(cp->addr));
5335 goto unlock;
5336 }
5337
5338 list_del(&params->action);
5339 list_del(&params->list);
5340 kfree(params);
5341 hci_update_background_scan(hdev);
5342
5343 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5344 } else {
5345 struct hci_conn_params *p, *tmp;
5346 struct bdaddr_list *b, *btmp;
5347
5348 if (cp->addr.type) {
5349 err = mgmt_cmd_complete(sk, hdev->id,
5350 MGMT_OP_REMOVE_DEVICE,
5351 MGMT_STATUS_INVALID_PARAMS,
5352 &cp->addr, sizeof(cp->addr));
5353 goto unlock;
5354 }
5355
5356 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5357 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5358 list_del(&b->list);
5359 kfree(b);
5360 }
5361
5362 hci_req_update_scan(hdev);
5363
5364 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5365 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5366 continue;
5367 device_removed(sk, hdev, &p->addr, p->addr_type);
5368 if (p->explicit_connect) {
5369 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5370 continue;
5371 }
5372 list_del(&p->action);
5373 list_del(&p->list);
5374 kfree(p);
5375 }
5376
5377 BT_DBG("All LE connection parameters were removed");
5378
5379 hci_update_background_scan(hdev);
5380 }
5381
5382 complete:
5383 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5384 MGMT_STATUS_SUCCESS, &cp->addr,
5385 sizeof(cp->addr));
5386 unlock:
5387 hci_dev_unlock(hdev);
5388 return err;
5389 }
5390
5391 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5392 u16 len)
5393 {
5394 struct mgmt_cp_load_conn_param *cp = data;
5395 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5396 sizeof(struct mgmt_conn_param));
5397 u16 param_count, expected_len;
5398 int i;
5399
5400 if (!lmp_le_capable(hdev))
5401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5402 MGMT_STATUS_NOT_SUPPORTED);
5403
5404 param_count = __le16_to_cpu(cp->param_count);
5405 if (param_count > max_param_count) {
5406 BT_ERR("load_conn_param: too big param_count value %u",
5407 param_count);
5408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5409 MGMT_STATUS_INVALID_PARAMS);
5410 }
5411
5412 expected_len = sizeof(*cp) + param_count *
5413 sizeof(struct mgmt_conn_param);
5414 if (expected_len != len) {
5415 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5416 expected_len, len);
5417 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5418 MGMT_STATUS_INVALID_PARAMS);
5419 }
5420
5421 BT_DBG("%s param_count %u", hdev->name, param_count);
5422
5423 hci_dev_lock(hdev);
5424
5425 hci_conn_params_clear_disabled(hdev);
5426
5427 for (i = 0; i < param_count; i++) {
5428 struct mgmt_conn_param *param = &cp->params[i];
5429 struct hci_conn_params *hci_param;
5430 u16 min, max, latency, timeout;
5431 u8 addr_type;
5432
5433 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5434 param->addr.type);
5435
5436 if (param->addr.type == BDADDR_LE_PUBLIC) {
5437 addr_type = ADDR_LE_DEV_PUBLIC;
5438 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5439 addr_type = ADDR_LE_DEV_RANDOM;
5440 } else {
5441 BT_ERR("Ignoring invalid connection parameters");
5442 continue;
5443 }
5444
5445 min = le16_to_cpu(param->min_interval);
5446 max = le16_to_cpu(param->max_interval);
5447 latency = le16_to_cpu(param->latency);
5448 timeout = le16_to_cpu(param->timeout);
5449
5450 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5451 min, max, latency, timeout);
5452
5453 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5454 BT_ERR("Ignoring invalid connection parameters");
5455 continue;
5456 }
5457
5458 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5459 addr_type);
5460 if (!hci_param) {
5461 BT_ERR("Failed to add connection parameters");
5462 continue;
5463 }
5464
5465 hci_param->conn_min_interval = min;
5466 hci_param->conn_max_interval = max;
5467 hci_param->conn_latency = latency;
5468 hci_param->supervision_timeout = timeout;
5469 }
5470
5471 hci_dev_unlock(hdev);
5472
5473 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5474 NULL, 0);
5475 }
5476
5477 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5478 void *data, u16 len)
5479 {
5480 struct mgmt_cp_set_external_config *cp = data;
5481 bool changed;
5482 int err;
5483
5484 BT_DBG("%s", hdev->name);
5485
5486 if (hdev_is_powered(hdev))
5487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5488 MGMT_STATUS_REJECTED);
5489
5490 if (cp->config != 0x00 && cp->config != 0x01)
5491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5492 MGMT_STATUS_INVALID_PARAMS);
5493
5494 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5496 MGMT_STATUS_NOT_SUPPORTED);
5497
5498 hci_dev_lock(hdev);
5499
5500 if (cp->config)
5501 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5502 else
5503 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5504
5505 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5506 if (err < 0)
5507 goto unlock;
5508
5509 if (!changed)
5510 goto unlock;
5511
5512 err = new_options(hdev, sk);
5513
5514 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5515 mgmt_index_removed(hdev);
5516
5517 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5518 hci_dev_set_flag(hdev, HCI_CONFIG);
5519 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5520
5521 queue_work(hdev->req_workqueue, &hdev->power_on);
5522 } else {
5523 set_bit(HCI_RAW, &hdev->flags);
5524 mgmt_index_added(hdev);
5525 }
5526 }
5527
5528 unlock:
5529 hci_dev_unlock(hdev);
5530 return err;
5531 }
5532
5533 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5534 void *data, u16 len)
5535 {
5536 struct mgmt_cp_set_public_address *cp = data;
5537 bool changed;
5538 int err;
5539
5540 BT_DBG("%s", hdev->name);
5541
5542 if (hdev_is_powered(hdev))
5543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5544 MGMT_STATUS_REJECTED);
5545
5546 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5548 MGMT_STATUS_INVALID_PARAMS);
5549
5550 if (!hdev->set_bdaddr)
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5553
5554 hci_dev_lock(hdev);
5555
5556 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5557 bacpy(&hdev->public_addr, &cp->bdaddr);
5558
5559 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5560 if (err < 0)
5561 goto unlock;
5562
5563 if (!changed)
5564 goto unlock;
5565
5566 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5567 err = new_options(hdev, sk);
5568
5569 if (is_configured(hdev)) {
5570 mgmt_index_removed(hdev);
5571
5572 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5573
5574 hci_dev_set_flag(hdev, HCI_CONFIG);
5575 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5576
5577 queue_work(hdev->req_workqueue, &hdev->power_on);
5578 }
5579
5580 unlock:
5581 hci_dev_unlock(hdev);
5582 return err;
5583 }
5584
5585 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5586 u16 opcode, struct sk_buff *skb)
5587 {
5588 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5589 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5590 u8 *h192, *r192, *h256, *r256;
5591 struct mgmt_pending_cmd *cmd;
5592 u16 eir_len;
5593 int err;
5594
5595 BT_DBG("%s status %u", hdev->name, status);
5596
5597 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5598 if (!cmd)
5599 return;
5600
5601 mgmt_cp = cmd->param;
5602
5603 if (status) {
5604 status = mgmt_status(status);
5605 eir_len = 0;
5606
5607 h192 = NULL;
5608 r192 = NULL;
5609 h256 = NULL;
5610 r256 = NULL;
5611 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5612 struct hci_rp_read_local_oob_data *rp;
5613
5614 if (skb->len != sizeof(*rp)) {
5615 status = MGMT_STATUS_FAILED;
5616 eir_len = 0;
5617 } else {
5618 status = MGMT_STATUS_SUCCESS;
5619 rp = (void *)skb->data;
5620
5621 eir_len = 5 + 18 + 18;
5622 h192 = rp->hash;
5623 r192 = rp->rand;
5624 h256 = NULL;
5625 r256 = NULL;
5626 }
5627 } else {
5628 struct hci_rp_read_local_oob_ext_data *rp;
5629
5630 if (skb->len != sizeof(*rp)) {
5631 status = MGMT_STATUS_FAILED;
5632 eir_len = 0;
5633 } else {
5634 status = MGMT_STATUS_SUCCESS;
5635 rp = (void *)skb->data;
5636
5637 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5638 eir_len = 5 + 18 + 18;
5639 h192 = NULL;
5640 r192 = NULL;
5641 } else {
5642 eir_len = 5 + 18 + 18 + 18 + 18;
5643 h192 = rp->hash192;
5644 r192 = rp->rand192;
5645 }
5646
5647 h256 = rp->hash256;
5648 r256 = rp->rand256;
5649 }
5650 }
5651
5652 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5653 if (!mgmt_rp)
5654 goto done;
5655
5656 if (status)
5657 goto send_rsp;
5658
5659 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5660 hdev->dev_class, 3);
5661
5662 if (h192 && r192) {
5663 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5664 EIR_SSP_HASH_C192, h192, 16);
5665 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5666 EIR_SSP_RAND_R192, r192, 16);
5667 }
5668
5669 if (h256 && r256) {
5670 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5671 EIR_SSP_HASH_C256, h256, 16);
5672 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5673 EIR_SSP_RAND_R256, r256, 16);
5674 }
5675
5676 send_rsp:
5677 mgmt_rp->type = mgmt_cp->type;
5678 mgmt_rp->eir_len = cpu_to_le16(eir_len);
5679
5680 err = mgmt_cmd_complete(cmd->sk, hdev->id,
5681 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5682 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5683 if (err < 0 || status)
5684 goto done;
5685
5686 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5687
5688 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5689 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5690 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5691 done:
5692 kfree(mgmt_rp);
5693 mgmt_pending_remove(cmd);
5694 }
5695
5696 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5697 struct mgmt_cp_read_local_oob_ext_data *cp)
5698 {
5699 struct mgmt_pending_cmd *cmd;
5700 struct hci_request req;
5701 int err;
5702
5703 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5704 cp, sizeof(*cp));
5705 if (!cmd)
5706 return -ENOMEM;
5707
5708 hci_req_init(&req, hdev);
5709
5710 if (bredr_sc_enabled(hdev))
5711 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5712 else
5713 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5714
5715 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5716 if (err < 0) {
5717 mgmt_pending_remove(cmd);
5718 return err;
5719 }
5720
5721 return 0;
5722 }
5723
5724 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5725 void *data, u16 data_len)
5726 {
5727 struct mgmt_cp_read_local_oob_ext_data *cp = data;
5728 struct mgmt_rp_read_local_oob_ext_data *rp;
5729 size_t rp_len;
5730 u16 eir_len;
5731 u8 status, flags, role, addr[7], hash[16], rand[16];
5732 int err;
5733
5734 BT_DBG("%s", hdev->name);
5735
5736 if (hdev_is_powered(hdev)) {
5737 switch (cp->type) {
5738 case BIT(BDADDR_BREDR):
5739 status = mgmt_bredr_support(hdev);
5740 if (status)
5741 eir_len = 0;
5742 else
5743 eir_len = 5;
5744 break;
5745 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5746 status = mgmt_le_support(hdev);
5747 if (status)
5748 eir_len = 0;
5749 else
5750 eir_len = 9 + 3 + 18 + 18 + 3;
5751 break;
5752 default:
5753 status = MGMT_STATUS_INVALID_PARAMS;
5754 eir_len = 0;
5755 break;
5756 }
5757 } else {
5758 status = MGMT_STATUS_NOT_POWERED;
5759 eir_len = 0;
5760 }
5761
5762 rp_len = sizeof(*rp) + eir_len;
5763 rp = kmalloc(rp_len, GFP_ATOMIC);
5764 if (!rp)
5765 return -ENOMEM;
5766
5767 if (status)
5768 goto complete;
5769
5770 hci_dev_lock(hdev);
5771
5772 eir_len = 0;
5773 switch (cp->type) {
5774 case BIT(BDADDR_BREDR):
5775 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5776 err = read_local_ssp_oob_req(hdev, sk, cp);
5777 hci_dev_unlock(hdev);
5778 if (!err)
5779 goto done;
5780
5781 status = MGMT_STATUS_FAILED;
5782 goto complete;
5783 } else {
5784 eir_len = eir_append_data(rp->eir, eir_len,
5785 EIR_CLASS_OF_DEV,
5786 hdev->dev_class, 3);
5787 }
5788 break;
5789 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5790 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5791 smp_generate_oob(hdev, hash, rand) < 0) {
5792 hci_dev_unlock(hdev);
5793 status = MGMT_STATUS_FAILED;
5794 goto complete;
5795 }
5796
5797 /* This should return the active RPA, but since the RPA
5798 * is only programmed on demand, it is really hard to fill
5799 * this in at the moment. For now disallow retrieving
5800 * local out-of-band data when privacy is in use.
5801 *
5802 * Returning the identity address will not help here since
5803 * pairing happens before the identity resolving key is
5804 * known and thus the connection establishment happens
5805 * based on the RPA and not the identity address.
5806 */
5807 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5808 hci_dev_unlock(hdev);
5809 status = MGMT_STATUS_REJECTED;
5810 goto complete;
5811 }
5812
5813 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5814 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5815 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5816 bacmp(&hdev->static_addr, BDADDR_ANY))) {
5817 memcpy(addr, &hdev->static_addr, 6);
5818 addr[6] = 0x01;
5819 } else {
5820 memcpy(addr, &hdev->bdaddr, 6);
5821 addr[6] = 0x00;
5822 }
5823
5824 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5825 addr, sizeof(addr));
5826
5827 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5828 role = 0x02;
5829 else
5830 role = 0x01;
5831
5832 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5833 &role, sizeof(role));
5834
5835 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5836 eir_len = eir_append_data(rp->eir, eir_len,
5837 EIR_LE_SC_CONFIRM,
5838 hash, sizeof(hash));
5839
5840 eir_len = eir_append_data(rp->eir, eir_len,
5841 EIR_LE_SC_RANDOM,
5842 rand, sizeof(rand));
5843 }
5844
5845 flags = mgmt_get_adv_discov_flags(hdev);
5846
5847 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5848 flags |= LE_AD_NO_BREDR;
5849
5850 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5851 &flags, sizeof(flags));
5852 break;
5853 }
5854
5855 hci_dev_unlock(hdev);
5856
5857 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5858
5859 status = MGMT_STATUS_SUCCESS;
5860
5861 complete:
5862 rp->type = cp->type;
5863 rp->eir_len = cpu_to_le16(eir_len);
5864
5865 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5866 status, rp, sizeof(*rp) + eir_len);
5867 if (err < 0 || status)
5868 goto done;
5869
5870 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5871 rp, sizeof(*rp) + eir_len,
5872 HCI_MGMT_OOB_DATA_EVENTS, sk);
5873
5874 done:
5875 kfree(rp);
5876
5877 return err;
5878 }
5879
5880 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5881 {
5882 u32 flags = 0;
5883
5884 flags |= MGMT_ADV_FLAG_CONNECTABLE;
5885 flags |= MGMT_ADV_FLAG_DISCOV;
5886 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5887 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5888
5889 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5890 flags |= MGMT_ADV_FLAG_TX_POWER;
5891
5892 return flags;
5893 }
5894
5895 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5896 void *data, u16 data_len)
5897 {
5898 struct mgmt_rp_read_adv_features *rp;
5899 size_t rp_len;
5900 int err;
5901 struct adv_info *adv_instance;
5902 u32 supported_flags;
5903 u8 *instance;
5904
5905 BT_DBG("%s", hdev->name);
5906
5907 if (!lmp_le_capable(hdev))
5908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5909 MGMT_STATUS_REJECTED);
5910
5911 hci_dev_lock(hdev);
5912
5913 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5914 rp = kmalloc(rp_len, GFP_ATOMIC);
5915 if (!rp) {
5916 hci_dev_unlock(hdev);
5917 return -ENOMEM;
5918 }
5919
5920 supported_flags = get_supported_adv_flags(hdev);
5921
5922 rp->supported_flags = cpu_to_le32(supported_flags);
5923 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
5924 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
5925 rp->max_instances = HCI_MAX_ADV_INSTANCES;
5926 rp->num_instances = hdev->adv_instance_cnt;
5927
5928 instance = rp->instance;
5929 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
5930 *instance = adv_instance->instance;
5931 instance++;
5932 }
5933
5934 hci_dev_unlock(hdev);
5935
5936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5937 MGMT_STATUS_SUCCESS, rp, rp_len);
5938
5939 kfree(rp);
5940
5941 return err;
5942 }
5943
5944 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
5945 u8 len, bool is_adv_data)
5946 {
5947 u8 max_len = HCI_MAX_AD_LENGTH;
5948 int i, cur_len;
5949 bool flags_managed = false;
5950 bool tx_power_managed = false;
5951
5952 if (is_adv_data) {
5953 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
5954 MGMT_ADV_FLAG_LIMITED_DISCOV |
5955 MGMT_ADV_FLAG_MANAGED_FLAGS)) {
5956 flags_managed = true;
5957 max_len -= 3;
5958 }
5959
5960 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
5961 tx_power_managed = true;
5962 max_len -= 3;
5963 }
5964 }
5965
5966 if (len > max_len)
5967 return false;
5968
5969 /* Make sure that the data is correctly formatted. */
5970 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
5971 cur_len = data[i];
5972
5973 if (flags_managed && data[i + 1] == EIR_FLAGS)
5974 return false;
5975
5976 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
5977 return false;
5978
5979 /* If the current field length would exceed the total data
5980 * length, then it's invalid.
5981 */
5982 if (i + cur_len >= len)
5983 return false;
5984 }
5985
5986 return true;
5987 }
5988
5989 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
5990 u16 opcode)
5991 {
5992 struct mgmt_pending_cmd *cmd;
5993 struct mgmt_cp_add_advertising *cp;
5994 struct mgmt_rp_add_advertising rp;
5995 struct adv_info *adv_instance, *n;
5996 u8 instance;
5997
5998 BT_DBG("status %d", status);
5999
6000 hci_dev_lock(hdev);
6001
6002 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6003
6004 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6005 if (!adv_instance->pending)
6006 continue;
6007
6008 if (!status) {
6009 adv_instance->pending = false;
6010 continue;
6011 }
6012
6013 instance = adv_instance->instance;
6014
6015 if (hdev->cur_adv_instance == instance)
6016 cancel_adv_timeout(hdev);
6017
6018 hci_remove_adv_instance(hdev, instance);
6019 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6020 }
6021
6022 if (!cmd)
6023 goto unlock;
6024
6025 cp = cmd->param;
6026 rp.instance = cp->instance;
6027
6028 if (status)
6029 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6030 mgmt_status(status));
6031 else
6032 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6033 mgmt_status(status), &rp, sizeof(rp));
6034
6035 mgmt_pending_remove(cmd);
6036
6037 unlock:
6038 hci_dev_unlock(hdev);
6039 }
6040
6041 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6042 void *data, u16 data_len)
6043 {
6044 struct mgmt_cp_add_advertising *cp = data;
6045 struct mgmt_rp_add_advertising rp;
6046 u32 flags;
6047 u32 supported_flags;
6048 u8 status;
6049 u16 timeout, duration;
6050 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6051 u8 schedule_instance = 0;
6052 struct adv_info *next_instance;
6053 int err;
6054 struct mgmt_pending_cmd *cmd;
6055 struct hci_request req;
6056
6057 BT_DBG("%s", hdev->name);
6058
6059 status = mgmt_le_support(hdev);
6060 if (status)
6061 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6062 status);
6063
6064 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6066 MGMT_STATUS_INVALID_PARAMS);
6067
6068 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6070 MGMT_STATUS_INVALID_PARAMS);
6071
6072 flags = __le32_to_cpu(cp->flags);
6073 timeout = __le16_to_cpu(cp->timeout);
6074 duration = __le16_to_cpu(cp->duration);
6075
6076 /* The current implementation only supports a subset of the specified
6077 * flags.
6078 */
6079 supported_flags = get_supported_adv_flags(hdev);
6080 if (flags & ~supported_flags)
6081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6082 MGMT_STATUS_INVALID_PARAMS);
6083
6084 hci_dev_lock(hdev);
6085
6086 if (timeout && !hdev_is_powered(hdev)) {
6087 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6088 MGMT_STATUS_REJECTED);
6089 goto unlock;
6090 }
6091
6092 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6093 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6094 pending_find(MGMT_OP_SET_LE, hdev)) {
6095 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6096 MGMT_STATUS_BUSY);
6097 goto unlock;
6098 }
6099
6100 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6101 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6102 cp->scan_rsp_len, false)) {
6103 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6104 MGMT_STATUS_INVALID_PARAMS);
6105 goto unlock;
6106 }
6107
6108 err = hci_add_adv_instance(hdev, cp->instance, flags,
6109 cp->adv_data_len, cp->data,
6110 cp->scan_rsp_len,
6111 cp->data + cp->adv_data_len,
6112 timeout, duration);
6113 if (err < 0) {
6114 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6115 MGMT_STATUS_FAILED);
6116 goto unlock;
6117 }
6118
6119 /* Only trigger an advertising added event if a new instance was
6120 * actually added.
6121 */
6122 if (hdev->adv_instance_cnt > prev_instance_cnt)
6123 mgmt_advertising_added(sk, hdev, cp->instance);
6124
6125 if (hdev->cur_adv_instance == cp->instance) {
6126 /* If the currently advertised instance is being changed then
6127 * cancel the current advertising and schedule the next
6128 * instance. If there is only one instance then the overridden
6129 * advertising data will be visible right away.
6130 */
6131 cancel_adv_timeout(hdev);
6132
6133 next_instance = hci_get_next_instance(hdev, cp->instance);
6134 if (next_instance)
6135 schedule_instance = next_instance->instance;
6136 } else if (!hdev->adv_instance_timeout) {
6137 /* Immediately advertise the new instance if no other
6138 * instance is currently being advertised.
6139 */
6140 schedule_instance = cp->instance;
6141 }
6142
6143 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6144 * there is no instance to be advertised then we have no HCI
6145 * communication to make. Simply return.
6146 */
6147 if (!hdev_is_powered(hdev) ||
6148 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6149 !schedule_instance) {
6150 rp.instance = cp->instance;
6151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6152 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6153 goto unlock;
6154 }
6155
6156 /* We're good to go, update advertising data, parameters, and start
6157 * advertising.
6158 */
6159 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6160 data_len);
6161 if (!cmd) {
6162 err = -ENOMEM;
6163 goto unlock;
6164 }
6165
6166 hci_req_init(&req, hdev);
6167
6168 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6169
6170 if (!err)
6171 err = hci_req_run(&req, add_advertising_complete);
6172
6173 if (err < 0)
6174 mgmt_pending_remove(cmd);
6175
6176 unlock:
6177 hci_dev_unlock(hdev);
6178
6179 return err;
6180 }
6181
6182 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6183 u16 opcode)
6184 {
6185 struct mgmt_pending_cmd *cmd;
6186 struct mgmt_cp_remove_advertising *cp;
6187 struct mgmt_rp_remove_advertising rp;
6188
6189 BT_DBG("status %d", status);
6190
6191 hci_dev_lock(hdev);
6192
6193 /* A failure status here only means that we failed to disable
6194 * advertising. Otherwise, the advertising instance has been removed,
6195 * so report success.
6196 */
6197 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6198 if (!cmd)
6199 goto unlock;
6200
6201 cp = cmd->param;
6202 rp.instance = cp->instance;
6203
6204 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6205 &rp, sizeof(rp));
6206 mgmt_pending_remove(cmd);
6207
6208 unlock:
6209 hci_dev_unlock(hdev);
6210 }
6211
6212 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6213 void *data, u16 data_len)
6214 {
6215 struct mgmt_cp_remove_advertising *cp = data;
6216 struct mgmt_rp_remove_advertising rp;
6217 struct mgmt_pending_cmd *cmd;
6218 struct hci_request req;
6219 int err;
6220
6221 BT_DBG("%s", hdev->name);
6222
6223 hci_dev_lock(hdev);
6224
6225 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6226 err = mgmt_cmd_status(sk, hdev->id,
6227 MGMT_OP_REMOVE_ADVERTISING,
6228 MGMT_STATUS_INVALID_PARAMS);
6229 goto unlock;
6230 }
6231
6232 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6233 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6234 pending_find(MGMT_OP_SET_LE, hdev)) {
6235 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6236 MGMT_STATUS_BUSY);
6237 goto unlock;
6238 }
6239
6240 if (list_empty(&hdev->adv_instances)) {
6241 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6242 MGMT_STATUS_INVALID_PARAMS);
6243 goto unlock;
6244 }
6245
6246 hci_req_init(&req, hdev);
6247
6248 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6249
6250 if (list_empty(&hdev->adv_instances))
6251 __hci_req_disable_advertising(&req);
6252
6253 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6254 * flag is set or the device isn't powered then we have no HCI
6255 * communication to make. Simply return.
6256 */
6257 if (skb_queue_empty(&req.cmd_q) ||
6258 !hdev_is_powered(hdev) ||
6259 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6260 rp.instance = cp->instance;
6261 err = mgmt_cmd_complete(sk, hdev->id,
6262 MGMT_OP_REMOVE_ADVERTISING,
6263 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6264 goto unlock;
6265 }
6266
6267 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6268 data_len);
6269 if (!cmd) {
6270 err = -ENOMEM;
6271 goto unlock;
6272 }
6273
6274 err = hci_req_run(&req, remove_advertising_complete);
6275 if (err < 0)
6276 mgmt_pending_remove(cmd);
6277
6278 unlock:
6279 hci_dev_unlock(hdev);
6280
6281 return err;
6282 }
6283
6284 static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6285 {
6286 u8 max_len = HCI_MAX_AD_LENGTH;
6287
6288 if (is_adv_data) {
6289 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6290 MGMT_ADV_FLAG_LIMITED_DISCOV |
6291 MGMT_ADV_FLAG_MANAGED_FLAGS))
6292 max_len -= 3;
6293
6294 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6295 max_len -= 3;
6296 }
6297
6298 return max_len;
6299 }
6300
6301 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6302 void *data, u16 data_len)
6303 {
6304 struct mgmt_cp_get_adv_size_info *cp = data;
6305 struct mgmt_rp_get_adv_size_info rp;
6306 u32 flags, supported_flags;
6307 int err;
6308
6309 BT_DBG("%s", hdev->name);
6310
6311 if (!lmp_le_capable(hdev))
6312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6313 MGMT_STATUS_REJECTED);
6314
6315 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6317 MGMT_STATUS_INVALID_PARAMS);
6318
6319 flags = __le32_to_cpu(cp->flags);
6320
6321 /* The current implementation only supports a subset of the specified
6322 * flags.
6323 */
6324 supported_flags = get_supported_adv_flags(hdev);
6325 if (flags & ~supported_flags)
6326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6327 MGMT_STATUS_INVALID_PARAMS);
6328
6329 rp.instance = cp->instance;
6330 rp.flags = cp->flags;
6331 rp.max_adv_data_len = tlv_data_max_len(flags, true);
6332 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
6333
6334 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6335 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6336
6337 return err;
6338 }
6339
6340 static const struct hci_mgmt_handler mgmt_handlers[] = {
6341 { NULL }, /* 0x0000 (no command) */
6342 { read_version, MGMT_READ_VERSION_SIZE,
6343 HCI_MGMT_NO_HDEV |
6344 HCI_MGMT_UNTRUSTED },
6345 { read_commands, MGMT_READ_COMMANDS_SIZE,
6346 HCI_MGMT_NO_HDEV |
6347 HCI_MGMT_UNTRUSTED },
6348 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6349 HCI_MGMT_NO_HDEV |
6350 HCI_MGMT_UNTRUSTED },
6351 { read_controller_info, MGMT_READ_INFO_SIZE,
6352 HCI_MGMT_UNTRUSTED },
6353 { set_powered, MGMT_SETTING_SIZE },
6354 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6355 { set_connectable, MGMT_SETTING_SIZE },
6356 { set_fast_connectable, MGMT_SETTING_SIZE },
6357 { set_bondable, MGMT_SETTING_SIZE },
6358 { set_link_security, MGMT_SETTING_SIZE },
6359 { set_ssp, MGMT_SETTING_SIZE },
6360 { set_hs, MGMT_SETTING_SIZE },
6361 { set_le, MGMT_SETTING_SIZE },
6362 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6363 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6364 { add_uuid, MGMT_ADD_UUID_SIZE },
6365 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6366 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6367 HCI_MGMT_VAR_LEN },
6368 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6369 HCI_MGMT_VAR_LEN },
6370 { disconnect, MGMT_DISCONNECT_SIZE },
6371 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6372 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6373 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6374 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6375 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6376 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6377 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6378 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6379 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6380 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6381 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6382 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6383 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6384 HCI_MGMT_VAR_LEN },
6385 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6386 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6387 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6388 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6389 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6390 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6391 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6392 { set_advertising, MGMT_SETTING_SIZE },
6393 { set_bredr, MGMT_SETTING_SIZE },
6394 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6395 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6396 { set_secure_conn, MGMT_SETTING_SIZE },
6397 { set_debug_keys, MGMT_SETTING_SIZE },
6398 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6399 { load_irks, MGMT_LOAD_IRKS_SIZE,
6400 HCI_MGMT_VAR_LEN },
6401 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6402 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6403 { add_device, MGMT_ADD_DEVICE_SIZE },
6404 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6405 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6406 HCI_MGMT_VAR_LEN },
6407 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6408 HCI_MGMT_NO_HDEV |
6409 HCI_MGMT_UNTRUSTED },
6410 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6411 HCI_MGMT_UNCONFIGURED |
6412 HCI_MGMT_UNTRUSTED },
6413 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6414 HCI_MGMT_UNCONFIGURED },
6415 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6416 HCI_MGMT_UNCONFIGURED },
6417 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6418 HCI_MGMT_VAR_LEN },
6419 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6420 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6421 HCI_MGMT_NO_HDEV |
6422 HCI_MGMT_UNTRUSTED },
6423 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6424 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6425 HCI_MGMT_VAR_LEN },
6426 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6427 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6428 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6429 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6430 HCI_MGMT_UNTRUSTED },
6431 };
6432
6433 void mgmt_index_added(struct hci_dev *hdev)
6434 {
6435 struct mgmt_ev_ext_index ev;
6436
6437 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6438 return;
6439
6440 switch (hdev->dev_type) {
6441 case HCI_PRIMARY:
6442 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6443 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6444 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6445 ev.type = 0x01;
6446 } else {
6447 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6448 HCI_MGMT_INDEX_EVENTS);
6449 ev.type = 0x00;
6450 }
6451 break;
6452 case HCI_AMP:
6453 ev.type = 0x02;
6454 break;
6455 default:
6456 return;
6457 }
6458
6459 ev.bus = hdev->bus;
6460
6461 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6462 HCI_MGMT_EXT_INDEX_EVENTS);
6463 }
6464
6465 void mgmt_index_removed(struct hci_dev *hdev)
6466 {
6467 struct mgmt_ev_ext_index ev;
6468 u8 status = MGMT_STATUS_INVALID_INDEX;
6469
6470 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6471 return;
6472
6473 switch (hdev->dev_type) {
6474 case HCI_PRIMARY:
6475 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6476
6477 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6478 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6479 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6480 ev.type = 0x01;
6481 } else {
6482 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6483 HCI_MGMT_INDEX_EVENTS);
6484 ev.type = 0x00;
6485 }
6486 break;
6487 case HCI_AMP:
6488 ev.type = 0x02;
6489 break;
6490 default:
6491 return;
6492 }
6493
6494 ev.bus = hdev->bus;
6495
6496 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6497 HCI_MGMT_EXT_INDEX_EVENTS);
6498 }
6499
6500 /* This function requires the caller holds hdev->lock */
6501 static void restart_le_actions(struct hci_dev *hdev)
6502 {
6503 struct hci_conn_params *p;
6504
6505 list_for_each_entry(p, &hdev->le_conn_params, list) {
6506 /* Needed for AUTO_OFF case where might not "really"
6507 * have been powered off.
6508 */
6509 list_del_init(&p->action);
6510
6511 switch (p->auto_connect) {
6512 case HCI_AUTO_CONN_DIRECT:
6513 case HCI_AUTO_CONN_ALWAYS:
6514 list_add(&p->action, &hdev->pend_le_conns);
6515 break;
6516 case HCI_AUTO_CONN_REPORT:
6517 list_add(&p->action, &hdev->pend_le_reports);
6518 break;
6519 default:
6520 break;
6521 }
6522 }
6523 }
6524
6525 void mgmt_power_on(struct hci_dev *hdev, int err)
6526 {
6527 struct cmd_lookup match = { NULL, hdev };
6528
6529 BT_DBG("err %d", err);
6530
6531 hci_dev_lock(hdev);
6532
6533 if (!err) {
6534 restart_le_actions(hdev);
6535 hci_update_background_scan(hdev);
6536 }
6537
6538 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6539
6540 new_settings(hdev, match.sk);
6541
6542 if (match.sk)
6543 sock_put(match.sk);
6544
6545 hci_dev_unlock(hdev);
6546 }
6547
6548 void __mgmt_power_off(struct hci_dev *hdev)
6549 {
6550 struct cmd_lookup match = { NULL, hdev };
6551 u8 status, zero_cod[] = { 0, 0, 0 };
6552
6553 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6554
6555 /* If the power off is because of hdev unregistration let
6556 * use the appropriate INVALID_INDEX status. Otherwise use
6557 * NOT_POWERED. We cover both scenarios here since later in
6558 * mgmt_index_removed() any hci_conn callbacks will have already
6559 * been triggered, potentially causing misleading DISCONNECTED
6560 * status responses.
6561 */
6562 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6563 status = MGMT_STATUS_INVALID_INDEX;
6564 else
6565 status = MGMT_STATUS_NOT_POWERED;
6566
6567 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6568
6569 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6570 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6571 zero_cod, sizeof(zero_cod),
6572 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6573 ext_info_changed(hdev, NULL);
6574 }
6575
6576 new_settings(hdev, match.sk);
6577
6578 if (match.sk)
6579 sock_put(match.sk);
6580 }
6581
6582 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6583 {
6584 struct mgmt_pending_cmd *cmd;
6585 u8 status;
6586
6587 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6588 if (!cmd)
6589 return;
6590
6591 if (err == -ERFKILL)
6592 status = MGMT_STATUS_RFKILLED;
6593 else
6594 status = MGMT_STATUS_FAILED;
6595
6596 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6597
6598 mgmt_pending_remove(cmd);
6599 }
6600
6601 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6602 bool persistent)
6603 {
6604 struct mgmt_ev_new_link_key ev;
6605
6606 memset(&ev, 0, sizeof(ev));
6607
6608 ev.store_hint = persistent;
6609 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6610 ev.key.addr.type = BDADDR_BREDR;
6611 ev.key.type = key->type;
6612 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6613 ev.key.pin_len = key->pin_len;
6614
6615 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6616 }
6617
6618 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6619 {
6620 switch (ltk->type) {
6621 case SMP_LTK:
6622 case SMP_LTK_SLAVE:
6623 if (ltk->authenticated)
6624 return MGMT_LTK_AUTHENTICATED;
6625 return MGMT_LTK_UNAUTHENTICATED;
6626 case SMP_LTK_P256:
6627 if (ltk->authenticated)
6628 return MGMT_LTK_P256_AUTH;
6629 return MGMT_LTK_P256_UNAUTH;
6630 case SMP_LTK_P256_DEBUG:
6631 return MGMT_LTK_P256_DEBUG;
6632 }
6633
6634 return MGMT_LTK_UNAUTHENTICATED;
6635 }
6636
6637 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6638 {
6639 struct mgmt_ev_new_long_term_key ev;
6640
6641 memset(&ev, 0, sizeof(ev));
6642
6643 /* Devices using resolvable or non-resolvable random addresses
6644 * without providing an identity resolving key don't require
6645 * to store long term keys. Their addresses will change the
6646 * next time around.
6647 *
6648 * Only when a remote device provides an identity address
6649 * make sure the long term key is stored. If the remote
6650 * identity is known, the long term keys are internally
6651 * mapped to the identity address. So allow static random
6652 * and public addresses here.
6653 */
6654 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6655 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6656 ev.store_hint = 0x00;
6657 else
6658 ev.store_hint = persistent;
6659
6660 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6661 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6662 ev.key.type = mgmt_ltk_type(key);
6663 ev.key.enc_size = key->enc_size;
6664 ev.key.ediv = key->ediv;
6665 ev.key.rand = key->rand;
6666
6667 if (key->type == SMP_LTK)
6668 ev.key.master = 1;
6669
6670 /* Make sure we copy only the significant bytes based on the
6671 * encryption key size, and set the rest of the value to zeroes.
6672 */
6673 memcpy(ev.key.val, key->val, key->enc_size);
6674 memset(ev.key.val + key->enc_size, 0,
6675 sizeof(ev.key.val) - key->enc_size);
6676
6677 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6678 }
6679
6680 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6681 {
6682 struct mgmt_ev_new_irk ev;
6683
6684 memset(&ev, 0, sizeof(ev));
6685
6686 ev.store_hint = persistent;
6687
6688 bacpy(&ev.rpa, &irk->rpa);
6689 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6690 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6691 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6692
6693 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6694 }
6695
6696 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6697 bool persistent)
6698 {
6699 struct mgmt_ev_new_csrk ev;
6700
6701 memset(&ev, 0, sizeof(ev));
6702
6703 /* Devices using resolvable or non-resolvable random addresses
6704 * without providing an identity resolving key don't require
6705 * to store signature resolving keys. Their addresses will change
6706 * the next time around.
6707 *
6708 * Only when a remote device provides an identity address
6709 * make sure the signature resolving key is stored. So allow
6710 * static random and public addresses here.
6711 */
6712 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6713 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6714 ev.store_hint = 0x00;
6715 else
6716 ev.store_hint = persistent;
6717
6718 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6719 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6720 ev.key.type = csrk->type;
6721 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6722
6723 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6724 }
6725
6726 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6727 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6728 u16 max_interval, u16 latency, u16 timeout)
6729 {
6730 struct mgmt_ev_new_conn_param ev;
6731
6732 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6733 return;
6734
6735 memset(&ev, 0, sizeof(ev));
6736 bacpy(&ev.addr.bdaddr, bdaddr);
6737 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6738 ev.store_hint = store_hint;
6739 ev.min_interval = cpu_to_le16(min_interval);
6740 ev.max_interval = cpu_to_le16(max_interval);
6741 ev.latency = cpu_to_le16(latency);
6742 ev.timeout = cpu_to_le16(timeout);
6743
6744 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6745 }
6746
6747 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6748 u32 flags, u8 *name, u8 name_len)
6749 {
6750 char buf[512];
6751 struct mgmt_ev_device_connected *ev = (void *) buf;
6752 u16 eir_len = 0;
6753
6754 bacpy(&ev->addr.bdaddr, &conn->dst);
6755 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6756
6757 ev->flags = __cpu_to_le32(flags);
6758
6759 /* We must ensure that the EIR Data fields are ordered and
6760 * unique. Keep it simple for now and avoid the problem by not
6761 * adding any BR/EDR data to the LE adv.
6762 */
6763 if (conn->le_adv_data_len > 0) {
6764 memcpy(&ev->eir[eir_len],
6765 conn->le_adv_data, conn->le_adv_data_len);
6766 eir_len = conn->le_adv_data_len;
6767 } else {
6768 if (name_len > 0)
6769 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6770 name, name_len);
6771
6772 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6773 eir_len = eir_append_data(ev->eir, eir_len,
6774 EIR_CLASS_OF_DEV,
6775 conn->dev_class, 3);
6776 }
6777
6778 ev->eir_len = cpu_to_le16(eir_len);
6779
6780 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6781 sizeof(*ev) + eir_len, NULL);
6782 }
6783
6784 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6785 {
6786 struct sock **sk = data;
6787
6788 cmd->cmd_complete(cmd, 0);
6789
6790 *sk = cmd->sk;
6791 sock_hold(*sk);
6792
6793 mgmt_pending_remove(cmd);
6794 }
6795
6796 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6797 {
6798 struct hci_dev *hdev = data;
6799 struct mgmt_cp_unpair_device *cp = cmd->param;
6800
6801 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6802
6803 cmd->cmd_complete(cmd, 0);
6804 mgmt_pending_remove(cmd);
6805 }
6806
6807 bool mgmt_powering_down(struct hci_dev *hdev)
6808 {
6809 struct mgmt_pending_cmd *cmd;
6810 struct mgmt_mode *cp;
6811
6812 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6813 if (!cmd)
6814 return false;
6815
6816 cp = cmd->param;
6817 if (!cp->val)
6818 return true;
6819
6820 return false;
6821 }
6822
6823 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6824 u8 link_type, u8 addr_type, u8 reason,
6825 bool mgmt_connected)
6826 {
6827 struct mgmt_ev_device_disconnected ev;
6828 struct sock *sk = NULL;
6829
6830 /* The connection is still in hci_conn_hash so test for 1
6831 * instead of 0 to know if this is the last one.
6832 */
6833 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6834 cancel_delayed_work(&hdev->power_off);
6835 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6836 }
6837
6838 if (!mgmt_connected)
6839 return;
6840
6841 if (link_type != ACL_LINK && link_type != LE_LINK)
6842 return;
6843
6844 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6845
6846 bacpy(&ev.addr.bdaddr, bdaddr);
6847 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6848 ev.reason = reason;
6849
6850 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6851
6852 if (sk)
6853 sock_put(sk);
6854
6855 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6856 hdev);
6857 }
6858
6859 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6860 u8 link_type, u8 addr_type, u8 status)
6861 {
6862 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6863 struct mgmt_cp_disconnect *cp;
6864 struct mgmt_pending_cmd *cmd;
6865
6866 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6867 hdev);
6868
6869 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6870 if (!cmd)
6871 return;
6872
6873 cp = cmd->param;
6874
6875 if (bacmp(bdaddr, &cp->addr.bdaddr))
6876 return;
6877
6878 if (cp->addr.type != bdaddr_type)
6879 return;
6880
6881 cmd->cmd_complete(cmd, mgmt_status(status));
6882 mgmt_pending_remove(cmd);
6883 }
6884
6885 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6886 u8 addr_type, u8 status)
6887 {
6888 struct mgmt_ev_connect_failed ev;
6889
6890 /* The connection is still in hci_conn_hash so test for 1
6891 * instead of 0 to know if this is the last one.
6892 */
6893 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6894 cancel_delayed_work(&hdev->power_off);
6895 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6896 }
6897
6898 bacpy(&ev.addr.bdaddr, bdaddr);
6899 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6900 ev.status = mgmt_status(status);
6901
6902 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6903 }
6904
6905 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6906 {
6907 struct mgmt_ev_pin_code_request ev;
6908
6909 bacpy(&ev.addr.bdaddr, bdaddr);
6910 ev.addr.type = BDADDR_BREDR;
6911 ev.secure = secure;
6912
6913 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6914 }
6915
6916 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6917 u8 status)
6918 {
6919 struct mgmt_pending_cmd *cmd;
6920
6921 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6922 if (!cmd)
6923 return;
6924
6925 cmd->cmd_complete(cmd, mgmt_status(status));
6926 mgmt_pending_remove(cmd);
6927 }
6928
6929 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6930 u8 status)
6931 {
6932 struct mgmt_pending_cmd *cmd;
6933
6934 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6935 if (!cmd)
6936 return;
6937
6938 cmd->cmd_complete(cmd, mgmt_status(status));
6939 mgmt_pending_remove(cmd);
6940 }
6941
6942 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6943 u8 link_type, u8 addr_type, u32 value,
6944 u8 confirm_hint)
6945 {
6946 struct mgmt_ev_user_confirm_request ev;
6947
6948 BT_DBG("%s", hdev->name);
6949
6950 bacpy(&ev.addr.bdaddr, bdaddr);
6951 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6952 ev.confirm_hint = confirm_hint;
6953 ev.value = cpu_to_le32(value);
6954
6955 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6956 NULL);
6957 }
6958
6959 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6960 u8 link_type, u8 addr_type)
6961 {
6962 struct mgmt_ev_user_passkey_request ev;
6963
6964 BT_DBG("%s", hdev->name);
6965
6966 bacpy(&ev.addr.bdaddr, bdaddr);
6967 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6968
6969 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6970 NULL);
6971 }
6972
6973 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6974 u8 link_type, u8 addr_type, u8 status,
6975 u8 opcode)
6976 {
6977 struct mgmt_pending_cmd *cmd;
6978
6979 cmd = pending_find(opcode, hdev);
6980 if (!cmd)
6981 return -ENOENT;
6982
6983 cmd->cmd_complete(cmd, mgmt_status(status));
6984 mgmt_pending_remove(cmd);
6985
6986 return 0;
6987 }
6988
6989 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6990 u8 link_type, u8 addr_type, u8 status)
6991 {
6992 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6993 status, MGMT_OP_USER_CONFIRM_REPLY);
6994 }
6995
6996 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6997 u8 link_type, u8 addr_type, u8 status)
6998 {
6999 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7000 status,
7001 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7002 }
7003
7004 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7005 u8 link_type, u8 addr_type, u8 status)
7006 {
7007 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7008 status, MGMT_OP_USER_PASSKEY_REPLY);
7009 }
7010
7011 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7012 u8 link_type, u8 addr_type, u8 status)
7013 {
7014 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7015 status,
7016 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7017 }
7018
7019 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7020 u8 link_type, u8 addr_type, u32 passkey,
7021 u8 entered)
7022 {
7023 struct mgmt_ev_passkey_notify ev;
7024
7025 BT_DBG("%s", hdev->name);
7026
7027 bacpy(&ev.addr.bdaddr, bdaddr);
7028 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7029 ev.passkey = __cpu_to_le32(passkey);
7030 ev.entered = entered;
7031
7032 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7033 }
7034
7035 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7036 {
7037 struct mgmt_ev_auth_failed ev;
7038 struct mgmt_pending_cmd *cmd;
7039 u8 status = mgmt_status(hci_status);
7040
7041 bacpy(&ev.addr.bdaddr, &conn->dst);
7042 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7043 ev.status = status;
7044
7045 cmd = find_pairing(conn);
7046
7047 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7048 cmd ? cmd->sk : NULL);
7049
7050 if (cmd) {
7051 cmd->cmd_complete(cmd, status);
7052 mgmt_pending_remove(cmd);
7053 }
7054 }
7055
7056 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7057 {
7058 struct cmd_lookup match = { NULL, hdev };
7059 bool changed;
7060
7061 if (status) {
7062 u8 mgmt_err = mgmt_status(status);
7063 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7064 cmd_status_rsp, &mgmt_err);
7065 return;
7066 }
7067
7068 if (test_bit(HCI_AUTH, &hdev->flags))
7069 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7070 else
7071 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7072
7073 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7074 &match);
7075
7076 if (changed)
7077 new_settings(hdev, match.sk);
7078
7079 if (match.sk)
7080 sock_put(match.sk);
7081 }
7082
7083 static void clear_eir(struct hci_request *req)
7084 {
7085 struct hci_dev *hdev = req->hdev;
7086 struct hci_cp_write_eir cp;
7087
7088 if (!lmp_ext_inq_capable(hdev))
7089 return;
7090
7091 memset(hdev->eir, 0, sizeof(hdev->eir));
7092
7093 memset(&cp, 0, sizeof(cp));
7094
7095 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7096 }
7097
7098 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7099 {
7100 struct cmd_lookup match = { NULL, hdev };
7101 struct hci_request req;
7102 bool changed = false;
7103
7104 if (status) {
7105 u8 mgmt_err = mgmt_status(status);
7106
7107 if (enable && hci_dev_test_and_clear_flag(hdev,
7108 HCI_SSP_ENABLED)) {
7109 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7110 new_settings(hdev, NULL);
7111 }
7112
7113 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7114 &mgmt_err);
7115 return;
7116 }
7117
7118 if (enable) {
7119 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7120 } else {
7121 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7122 if (!changed)
7123 changed = hci_dev_test_and_clear_flag(hdev,
7124 HCI_HS_ENABLED);
7125 else
7126 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7127 }
7128
7129 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7130
7131 if (changed)
7132 new_settings(hdev, match.sk);
7133
7134 if (match.sk)
7135 sock_put(match.sk);
7136
7137 hci_req_init(&req, hdev);
7138
7139 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7140 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7141 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7142 sizeof(enable), &enable);
7143 __hci_req_update_eir(&req);
7144 } else {
7145 clear_eir(&req);
7146 }
7147
7148 hci_req_run(&req, NULL);
7149 }
7150
7151 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7152 {
7153 struct cmd_lookup *match = data;
7154
7155 if (match->sk == NULL) {
7156 match->sk = cmd->sk;
7157 sock_hold(match->sk);
7158 }
7159 }
7160
7161 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7162 u8 status)
7163 {
7164 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7165
7166 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7167 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7168 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7169
7170 if (!status) {
7171 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7172 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7173 ext_info_changed(hdev, NULL);
7174 }
7175
7176 if (match.sk)
7177 sock_put(match.sk);
7178 }
7179
7180 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7181 {
7182 struct mgmt_cp_set_local_name ev;
7183 struct mgmt_pending_cmd *cmd;
7184
7185 if (status)
7186 return;
7187
7188 memset(&ev, 0, sizeof(ev));
7189 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7190 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7191
7192 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7193 if (!cmd) {
7194 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7195
7196 /* If this is a HCI command related to powering on the
7197 * HCI dev don't send any mgmt signals.
7198 */
7199 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7200 return;
7201 }
7202
7203 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7204 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7205 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7206 }
7207
7208 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7209 {
7210 int i;
7211
7212 for (i = 0; i < uuid_count; i++) {
7213 if (!memcmp(uuid, uuids[i], 16))
7214 return true;
7215 }
7216
7217 return false;
7218 }
7219
7220 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7221 {
7222 u16 parsed = 0;
7223
7224 while (parsed < eir_len) {
7225 u8 field_len = eir[0];
7226 u8 uuid[16];
7227 int i;
7228
7229 if (field_len == 0)
7230 break;
7231
7232 if (eir_len - parsed < field_len + 1)
7233 break;
7234
7235 switch (eir[1]) {
7236 case EIR_UUID16_ALL:
7237 case EIR_UUID16_SOME:
7238 for (i = 0; i + 3 <= field_len; i += 2) {
7239 memcpy(uuid, bluetooth_base_uuid, 16);
7240 uuid[13] = eir[i + 3];
7241 uuid[12] = eir[i + 2];
7242 if (has_uuid(uuid, uuid_count, uuids))
7243 return true;
7244 }
7245 break;
7246 case EIR_UUID32_ALL:
7247 case EIR_UUID32_SOME:
7248 for (i = 0; i + 5 <= field_len; i += 4) {
7249 memcpy(uuid, bluetooth_base_uuid, 16);
7250 uuid[15] = eir[i + 5];
7251 uuid[14] = eir[i + 4];
7252 uuid[13] = eir[i + 3];
7253 uuid[12] = eir[i + 2];
7254 if (has_uuid(uuid, uuid_count, uuids))
7255 return true;
7256 }
7257 break;
7258 case EIR_UUID128_ALL:
7259 case EIR_UUID128_SOME:
7260 for (i = 0; i + 17 <= field_len; i += 16) {
7261 memcpy(uuid, eir + i + 2, 16);
7262 if (has_uuid(uuid, uuid_count, uuids))
7263 return true;
7264 }
7265 break;
7266 }
7267
7268 parsed += field_len + 1;
7269 eir += field_len + 1;
7270 }
7271
7272 return false;
7273 }
7274
7275 static void restart_le_scan(struct hci_dev *hdev)
7276 {
7277 /* If controller is not scanning we are done. */
7278 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7279 return;
7280
7281 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7282 hdev->discovery.scan_start +
7283 hdev->discovery.scan_duration))
7284 return;
7285
7286 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7287 DISCOV_LE_RESTART_DELAY);
7288 }
7289
7290 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7291 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7292 {
7293 /* If a RSSI threshold has been specified, and
7294 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7295 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7296 * is set, let it through for further processing, as we might need to
7297 * restart the scan.
7298 *
7299 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7300 * the results are also dropped.
7301 */
7302 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7303 (rssi == HCI_RSSI_INVALID ||
7304 (rssi < hdev->discovery.rssi &&
7305 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7306 return false;
7307
7308 if (hdev->discovery.uuid_count != 0) {
7309 /* If a list of UUIDs is provided in filter, results with no
7310 * matching UUID should be dropped.
7311 */
7312 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7313 hdev->discovery.uuids) &&
7314 !eir_has_uuids(scan_rsp, scan_rsp_len,
7315 hdev->discovery.uuid_count,
7316 hdev->discovery.uuids))
7317 return false;
7318 }
7319
7320 /* If duplicate filtering does not report RSSI changes, then restart
7321 * scanning to ensure updated result with updated RSSI values.
7322 */
7323 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7324 restart_le_scan(hdev);
7325
7326 /* Validate RSSI value against the RSSI threshold once more. */
7327 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7328 rssi < hdev->discovery.rssi)
7329 return false;
7330 }
7331
7332 return true;
7333 }
7334
7335 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7336 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7337 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7338 {
7339 char buf[512];
7340 struct mgmt_ev_device_found *ev = (void *)buf;
7341 size_t ev_size;
7342
7343 /* Don't send events for a non-kernel initiated discovery. With
7344 * LE one exception is if we have pend_le_reports > 0 in which
7345 * case we're doing passive scanning and want these events.
7346 */
7347 if (!hci_discovery_active(hdev)) {
7348 if (link_type == ACL_LINK)
7349 return;
7350 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7351 return;
7352 }
7353
7354 if (hdev->discovery.result_filtering) {
7355 /* We are using service discovery */
7356 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7357 scan_rsp_len))
7358 return;
7359 }
7360
7361 if (hdev->discovery.limited) {
7362 /* Check for limited discoverable bit */
7363 if (dev_class) {
7364 if (!(dev_class[1] & 0x20))
7365 return;
7366 } else {
7367 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7368 if (!flags || !(flags[0] & LE_AD_LIMITED))
7369 return;
7370 }
7371 }
7372
7373 /* Make sure that the buffer is big enough. The 5 extra bytes
7374 * are for the potential CoD field.
7375 */
7376 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7377 return;
7378
7379 memset(buf, 0, sizeof(buf));
7380
7381 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7382 * RSSI value was reported as 0 when not available. This behavior
7383 * is kept when using device discovery. This is required for full
7384 * backwards compatibility with the API.
7385 *
7386 * However when using service discovery, the value 127 will be
7387 * returned when the RSSI is not available.
7388 */
7389 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7390 link_type == ACL_LINK)
7391 rssi = 0;
7392
7393 bacpy(&ev->addr.bdaddr, bdaddr);
7394 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7395 ev->rssi = rssi;
7396 ev->flags = cpu_to_le32(flags);
7397
7398 if (eir_len > 0)
7399 /* Copy EIR or advertising data into event */
7400 memcpy(ev->eir, eir, eir_len);
7401
7402 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7403 NULL))
7404 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7405 dev_class, 3);
7406
7407 if (scan_rsp_len > 0)
7408 /* Append scan response data to event */
7409 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7410
7411 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7412 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7413
7414 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7415 }
7416
7417 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7418 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7419 {
7420 struct mgmt_ev_device_found *ev;
7421 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7422 u16 eir_len;
7423
7424 ev = (struct mgmt_ev_device_found *) buf;
7425
7426 memset(buf, 0, sizeof(buf));
7427
7428 bacpy(&ev->addr.bdaddr, bdaddr);
7429 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7430 ev->rssi = rssi;
7431
7432 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7433 name_len);
7434
7435 ev->eir_len = cpu_to_le16(eir_len);
7436
7437 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7438 }
7439
7440 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7441 {
7442 struct mgmt_ev_discovering ev;
7443
7444 BT_DBG("%s discovering %u", hdev->name, discovering);
7445
7446 memset(&ev, 0, sizeof(ev));
7447 ev.type = hdev->discovery.type;
7448 ev.discovering = discovering;
7449
7450 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7451 }
7452
7453 static struct hci_mgmt_chan chan = {
7454 .channel = HCI_CHANNEL_CONTROL,
7455 .handler_count = ARRAY_SIZE(mgmt_handlers),
7456 .handlers = mgmt_handlers,
7457 .hdev_init = mgmt_init_hdev,
7458 };
7459
7460 int mgmt_init(void)
7461 {
7462 return hci_mgmt_chan_register(&chan);
7463 }
7464
7465 void mgmt_exit(void)
7466 {
7467 hci_mgmt_chan_unregister(&chan);
7468 }
This page took 0.277963 seconds and 5 git commands to generate.