95473e96670369359b78002264ed92d045972206
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "smp.h"
37
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
40
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_READ_INFO,
44 MGMT_OP_SET_POWERED,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_BONDABLE,
49 MGMT_OP_SET_LINK_SECURITY,
50 MGMT_OP_SET_SSP,
51 MGMT_OP_SET_HS,
52 MGMT_OP_SET_LE,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_ADD_UUID,
56 MGMT_OP_REMOVE_UUID,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_DISCONNECT,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_PAIR_DEVICE,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_CONFIRM_NAME,
77 MGMT_OP_BLOCK_DEVICE,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_BREDR,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
86 MGMT_OP_SET_PRIVACY,
87 MGMT_OP_LOAD_IRKS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_ADD_DEVICE,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
98 };
99
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
102 MGMT_EV_INDEX_ADDED,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
115 MGMT_EV_AUTH_FAILED,
116 MGMT_EV_DEVICE_FOUND,
117 MGMT_EV_DISCOVERING,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_NEW_IRK,
123 MGMT_EV_NEW_CSRK,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
130 };
131
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
133
134 struct pending_cmd {
135 struct list_head list;
136 u16 opcode;
137 int index;
138 void *param;
139 size_t param_len;
140 struct sock *sk;
141 void *user_data;
142 void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
143 };
144
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_SUCCESS,
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
208 };
209
210 static u8 mgmt_status(u8 hci_status)
211 {
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
214
215 return MGMT_STATUS_FAILED;
216 }
217
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
220 {
221 struct sk_buff *skb;
222 struct mgmt_hdr *hdr;
223
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
225 if (!skb)
226 return -ENOMEM;
227
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
230 if (hdev)
231 hdr->index = cpu_to_le16(hdev->id);
232 else
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
235
236 if (data)
237 memcpy(skb_put(skb, data_len), data, data_len);
238
239 /* Time stamp */
240 __net_timestamp(skb);
241
242 hci_send_to_control(skb, skip_sk);
243 kfree_skb(skb);
244
245 return 0;
246 }
247
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
249 {
250 struct sk_buff *skb;
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
253 int err;
254
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
258 if (!skb)
259 return -ENOMEM;
260
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
262
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
266
267 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->status = status;
269 ev->opcode = cpu_to_le16(cmd);
270
271 err = sock_queue_rcv_skb(sk, skb);
272 if (err < 0)
273 kfree_skb(skb);
274
275 return err;
276 }
277
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
280 {
281 struct sk_buff *skb;
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
284 int err;
285
286 BT_DBG("sock %p", sk);
287
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
289 if (!skb)
290 return -ENOMEM;
291
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
293
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
300 ev->status = status;
301
302 if (rp)
303 memcpy(ev->data, rp, rp_len);
304
305 err = sock_queue_rcv_skb(sk, skb);
306 if (err < 0)
307 kfree_skb(skb);
308
309 return err;
310 }
311
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_version rp;
316
317 BT_DBG("sock %p", sk);
318
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
321
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
323 sizeof(rp));
324 }
325
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
327 u16 data_len)
328 {
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
332 __le16 *opcode;
333 size_t rp_size;
334 int i, err;
335
336 BT_DBG("sock %p", sk);
337
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339
340 rp = kmalloc(rp_size, GFP_KERNEL);
341 if (!rp)
342 return -ENOMEM;
343
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
346
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
349
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
352
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
354 rp_size);
355 kfree(rp);
356
357 return err;
358 }
359
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
361 u16 data_len)
362 {
363 struct mgmt_rp_read_index_list *rp;
364 struct hci_dev *d;
365 size_t rp_len;
366 u16 count;
367 int err;
368
369 BT_DBG("sock %p", sk);
370
371 read_lock(&hci_dev_list_lock);
372
373 count = 0;
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
377 count++;
378 }
379
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
382 if (!rp) {
383 read_unlock(&hci_dev_list_lock);
384 return -ENOMEM;
385 }
386
387 count = 0;
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
392 continue;
393
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
396 */
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
398 continue;
399
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
404 }
405 }
406
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
409
410 read_unlock(&hci_dev_list_lock);
411
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
413 rp_len);
414
415 kfree(rp);
416
417 return err;
418 }
419
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
422 {
423 struct mgmt_rp_read_unconf_index_list *rp;
424 struct hci_dev *d;
425 size_t rp_len;
426 u16 count;
427 int err;
428
429 BT_DBG("sock %p", sk);
430
431 read_lock(&hci_dev_list_lock);
432
433 count = 0;
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
437 count++;
438 }
439
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
442 if (!rp) {
443 read_unlock(&hci_dev_list_lock);
444 return -ENOMEM;
445 }
446
447 count = 0;
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
452 continue;
453
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
456 */
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
458 continue;
459
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
464 }
465 }
466
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
469
470 read_unlock(&hci_dev_list_lock);
471
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
473 0, rp, rp_len);
474
475 kfree(rp);
476
477 return err;
478 }
479
480 static bool is_configured(struct hci_dev *hdev)
481 {
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
484 return false;
485
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
488 return false;
489
490 return true;
491 }
492
493 static __le32 get_missing_options(struct hci_dev *hdev)
494 {
495 u32 options = 0;
496
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504
505 return cpu_to_le32(options);
506 }
507
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 {
510 __le32 options = get_missing_options(hdev);
511
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
514 }
515
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 {
518 __le32 options = get_missing_options(hdev);
519
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
521 sizeof(options));
522 }
523
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
526 {
527 struct mgmt_rp_read_config_info rp;
528 u32 options = 0;
529
530 BT_DBG("sock %p %s", sk, hdev->name);
531
532 hci_dev_lock(hdev);
533
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
545
546 hci_dev_unlock(hdev);
547
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
549 sizeof(rp));
550 }
551
552 static u32 get_supported_settings(struct hci_dev *hdev)
553 {
554 u32 settings = 0;
555
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
561
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
567
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
571 }
572
573 if (lmp_sc_capable(hdev) ||
574 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
575 settings |= MGMT_SETTING_SECURE_CONN;
576 }
577
578 if (lmp_le_capable(hdev)) {
579 settings |= MGMT_SETTING_LE;
580 settings |= MGMT_SETTING_ADVERTISING;
581 settings |= MGMT_SETTING_SECURE_CONN;
582 settings |= MGMT_SETTING_PRIVACY;
583 }
584
585 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 hdev->set_bdaddr)
587 settings |= MGMT_SETTING_CONFIGURATION;
588
589 return settings;
590 }
591
592 static u32 get_current_settings(struct hci_dev *hdev)
593 {
594 u32 settings = 0;
595
596 if (hdev_is_powered(hdev))
597 settings |= MGMT_SETTING_POWERED;
598
599 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
600 settings |= MGMT_SETTING_CONNECTABLE;
601
602 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
603 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604
605 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
606 settings |= MGMT_SETTING_DISCOVERABLE;
607
608 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
609 settings |= MGMT_SETTING_BONDABLE;
610
611 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
612 settings |= MGMT_SETTING_BREDR;
613
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
615 settings |= MGMT_SETTING_LE;
616
617 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
618 settings |= MGMT_SETTING_LINK_SECURITY;
619
620 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
621 settings |= MGMT_SETTING_SSP;
622
623 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
624 settings |= MGMT_SETTING_HS;
625
626 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
627 settings |= MGMT_SETTING_ADVERTISING;
628
629 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
630 settings |= MGMT_SETTING_SECURE_CONN;
631
632 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
633 settings |= MGMT_SETTING_DEBUG_KEYS;
634
635 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
636 settings |= MGMT_SETTING_PRIVACY;
637
638 return settings;
639 }
640
641 #define PNP_INFO_SVCLASS_ID 0x1200
642
643 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 {
645 u8 *ptr = data, *uuids_start = NULL;
646 struct bt_uuid *uuid;
647
648 if (len < 4)
649 return ptr;
650
651 list_for_each_entry(uuid, &hdev->uuids, list) {
652 u16 uuid16;
653
654 if (uuid->size != 16)
655 continue;
656
657 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
658 if (uuid16 < 0x1100)
659 continue;
660
661 if (uuid16 == PNP_INFO_SVCLASS_ID)
662 continue;
663
664 if (!uuids_start) {
665 uuids_start = ptr;
666 uuids_start[0] = 1;
667 uuids_start[1] = EIR_UUID16_ALL;
668 ptr += 2;
669 }
670
671 /* Stop if not enough space to put next UUID */
672 if ((ptr - data) + sizeof(u16) > len) {
673 uuids_start[1] = EIR_UUID16_SOME;
674 break;
675 }
676
677 *ptr++ = (uuid16 & 0x00ff);
678 *ptr++ = (uuid16 & 0xff00) >> 8;
679 uuids_start[0] += sizeof(uuid16);
680 }
681
682 return ptr;
683 }
684
685 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 {
687 u8 *ptr = data, *uuids_start = NULL;
688 struct bt_uuid *uuid;
689
690 if (len < 6)
691 return ptr;
692
693 list_for_each_entry(uuid, &hdev->uuids, list) {
694 if (uuid->size != 32)
695 continue;
696
697 if (!uuids_start) {
698 uuids_start = ptr;
699 uuids_start[0] = 1;
700 uuids_start[1] = EIR_UUID32_ALL;
701 ptr += 2;
702 }
703
704 /* Stop if not enough space to put next UUID */
705 if ((ptr - data) + sizeof(u32) > len) {
706 uuids_start[1] = EIR_UUID32_SOME;
707 break;
708 }
709
710 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 ptr += sizeof(u32);
712 uuids_start[0] += sizeof(u32);
713 }
714
715 return ptr;
716 }
717
718 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 {
720 u8 *ptr = data, *uuids_start = NULL;
721 struct bt_uuid *uuid;
722
723 if (len < 18)
724 return ptr;
725
726 list_for_each_entry(uuid, &hdev->uuids, list) {
727 if (uuid->size != 128)
728 continue;
729
730 if (!uuids_start) {
731 uuids_start = ptr;
732 uuids_start[0] = 1;
733 uuids_start[1] = EIR_UUID128_ALL;
734 ptr += 2;
735 }
736
737 /* Stop if not enough space to put next UUID */
738 if ((ptr - data) + 16 > len) {
739 uuids_start[1] = EIR_UUID128_SOME;
740 break;
741 }
742
743 memcpy(ptr, uuid->uuid, 16);
744 ptr += 16;
745 uuids_start[0] += 16;
746 }
747
748 return ptr;
749 }
750
751 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 {
753 struct pending_cmd *cmd;
754
755 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
756 if (cmd->opcode == opcode)
757 return cmd;
758 }
759
760 return NULL;
761 }
762
763 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
764 struct hci_dev *hdev,
765 const void *data)
766 {
767 struct pending_cmd *cmd;
768
769 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
770 if (cmd->user_data != data)
771 continue;
772 if (cmd->opcode == opcode)
773 return cmd;
774 }
775
776 return NULL;
777 }
778
779 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
780 {
781 u8 ad_len = 0;
782 size_t name_len;
783
784 name_len = strlen(hdev->dev_name);
785 if (name_len > 0) {
786 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787
788 if (name_len > max_len) {
789 name_len = max_len;
790 ptr[1] = EIR_NAME_SHORT;
791 } else
792 ptr[1] = EIR_NAME_COMPLETE;
793
794 ptr[0] = name_len + 1;
795
796 memcpy(ptr + 2, hdev->dev_name, name_len);
797
798 ad_len += (name_len + 2);
799 ptr += (name_len + 2);
800 }
801
802 return ad_len;
803 }
804
805 static void update_scan_rsp_data(struct hci_request *req)
806 {
807 struct hci_dev *hdev = req->hdev;
808 struct hci_cp_le_set_scan_rsp_data cp;
809 u8 len;
810
811 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
812 return;
813
814 memset(&cp, 0, sizeof(cp));
815
816 len = create_scan_rsp_data(hdev, cp.data);
817
818 if (hdev->scan_rsp_data_len == len &&
819 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
820 return;
821
822 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
823 hdev->scan_rsp_data_len = len;
824
825 cp.length = len;
826
827 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 }
829
830 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 {
832 struct pending_cmd *cmd;
833
834 /* If there's a pending mgmt command the flags will not yet have
835 * their final values, so check for this first.
836 */
837 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 if (cmd) {
839 struct mgmt_mode *cp = cmd->param;
840 if (cp->val == 0x01)
841 return LE_AD_GENERAL;
842 else if (cp->val == 0x02)
843 return LE_AD_LIMITED;
844 } else {
845 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_LIMITED;
847 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
848 return LE_AD_GENERAL;
849 }
850
851 return 0;
852 }
853
854 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 {
856 u8 ad_len = 0, flags = 0;
857
858 flags |= get_adv_discov_flags(hdev);
859
860 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
861 flags |= LE_AD_NO_BREDR;
862
863 if (flags) {
864 BT_DBG("adv flags 0x%02x", flags);
865
866 ptr[0] = 2;
867 ptr[1] = EIR_FLAGS;
868 ptr[2] = flags;
869
870 ad_len += 3;
871 ptr += 3;
872 }
873
874 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[0] = 2;
876 ptr[1] = EIR_TX_POWER;
877 ptr[2] = (u8) hdev->adv_tx_power;
878
879 ad_len += 3;
880 ptr += 3;
881 }
882
883 return ad_len;
884 }
885
886 static void update_adv_data(struct hci_request *req)
887 {
888 struct hci_dev *hdev = req->hdev;
889 struct hci_cp_le_set_adv_data cp;
890 u8 len;
891
892 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
893 return;
894
895 memset(&cp, 0, sizeof(cp));
896
897 len = create_adv_data(hdev, cp.data);
898
899 if (hdev->adv_data_len == len &&
900 memcmp(cp.data, hdev->adv_data, len) == 0)
901 return;
902
903 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
904 hdev->adv_data_len = len;
905
906 cp.length = len;
907
908 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 }
910
911 int mgmt_update_adv_data(struct hci_dev *hdev)
912 {
913 struct hci_request req;
914
915 hci_req_init(&req, hdev);
916 update_adv_data(&req);
917
918 return hci_req_run(&req, NULL);
919 }
920
921 static void create_eir(struct hci_dev *hdev, u8 *data)
922 {
923 u8 *ptr = data;
924 size_t name_len;
925
926 name_len = strlen(hdev->dev_name);
927
928 if (name_len > 0) {
929 /* EIR Data type */
930 if (name_len > 48) {
931 name_len = 48;
932 ptr[1] = EIR_NAME_SHORT;
933 } else
934 ptr[1] = EIR_NAME_COMPLETE;
935
936 /* EIR Data length */
937 ptr[0] = name_len + 1;
938
939 memcpy(ptr + 2, hdev->dev_name, name_len);
940
941 ptr += (name_len + 2);
942 }
943
944 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[0] = 2;
946 ptr[1] = EIR_TX_POWER;
947 ptr[2] = (u8) hdev->inq_tx_power;
948
949 ptr += 3;
950 }
951
952 if (hdev->devid_source > 0) {
953 ptr[0] = 9;
954 ptr[1] = EIR_DEVICE_ID;
955
956 put_unaligned_le16(hdev->devid_source, ptr + 2);
957 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
958 put_unaligned_le16(hdev->devid_product, ptr + 6);
959 put_unaligned_le16(hdev->devid_version, ptr + 8);
960
961 ptr += 10;
962 }
963
964 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
966 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
967 }
968
969 static void update_eir(struct hci_request *req)
970 {
971 struct hci_dev *hdev = req->hdev;
972 struct hci_cp_write_eir cp;
973
974 if (!hdev_is_powered(hdev))
975 return;
976
977 if (!lmp_ext_inq_capable(hdev))
978 return;
979
980 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
981 return;
982
983 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
984 return;
985
986 memset(&cp, 0, sizeof(cp));
987
988 create_eir(hdev, cp.data);
989
990 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
991 return;
992
993 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994
995 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
996 }
997
998 static u8 get_service_classes(struct hci_dev *hdev)
999 {
1000 struct bt_uuid *uuid;
1001 u8 val = 0;
1002
1003 list_for_each_entry(uuid, &hdev->uuids, list)
1004 val |= uuid->svc_hint;
1005
1006 return val;
1007 }
1008
1009 static void update_class(struct hci_request *req)
1010 {
1011 struct hci_dev *hdev = req->hdev;
1012 u8 cod[3];
1013
1014 BT_DBG("%s", hdev->name);
1015
1016 if (!hdev_is_powered(hdev))
1017 return;
1018
1019 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1020 return;
1021
1022 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1023 return;
1024
1025 cod[0] = hdev->minor_class;
1026 cod[1] = hdev->major_class;
1027 cod[2] = get_service_classes(hdev);
1028
1029 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1030 cod[1] |= 0x20;
1031
1032 if (memcmp(cod, hdev->dev_class, 3) == 0)
1033 return;
1034
1035 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1036 }
1037
1038 static bool get_connectable(struct hci_dev *hdev)
1039 {
1040 struct pending_cmd *cmd;
1041
1042 /* If there's a pending mgmt command the flag will not yet have
1043 * it's final value, so check for this first.
1044 */
1045 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 if (cmd) {
1047 struct mgmt_mode *cp = cmd->param;
1048 return cp->val;
1049 }
1050
1051 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1052 }
1053
1054 static void disable_advertising(struct hci_request *req)
1055 {
1056 u8 enable = 0x00;
1057
1058 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1059 }
1060
1061 static void enable_advertising(struct hci_request *req)
1062 {
1063 struct hci_dev *hdev = req->hdev;
1064 struct hci_cp_le_set_adv_param cp;
1065 u8 own_addr_type, enable = 0x01;
1066 bool connectable;
1067
1068 if (hci_conn_num(hdev, LE_LINK) > 0)
1069 return;
1070
1071 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1072 disable_advertising(req);
1073
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 */
1079 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080
1081 connectable = get_connectable(hdev);
1082
1083 /* Set require_privacy to true only when non-connectable
1084 * advertising is used. In that case it is fine to use a
1085 * non-resolvable private address.
1086 */
1087 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1088 return;
1089
1090 memset(&cp, 0, sizeof(cp));
1091 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1092 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1093 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1094 cp.own_address_type = own_addr_type;
1095 cp.channel_map = hdev->le_adv_channel_map;
1096
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098
1099 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1100 }
1101
1102 static void service_cache_off(struct work_struct *work)
1103 {
1104 struct hci_dev *hdev = container_of(work, struct hci_dev,
1105 service_cache.work);
1106 struct hci_request req;
1107
1108 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1109 return;
1110
1111 hci_req_init(&req, hdev);
1112
1113 hci_dev_lock(hdev);
1114
1115 update_eir(&req);
1116 update_class(&req);
1117
1118 hci_dev_unlock(hdev);
1119
1120 hci_req_run(&req, NULL);
1121 }
1122
1123 static void rpa_expired(struct work_struct *work)
1124 {
1125 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 rpa_expired.work);
1127 struct hci_request req;
1128
1129 BT_DBG("");
1130
1131 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132
1133 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1134 return;
1135
1136 /* The generation of a new RPA and programming it into the
1137 * controller happens in the enable_advertising() function.
1138 */
1139 hci_req_init(&req, hdev);
1140 enable_advertising(&req);
1141 hci_req_run(&req, NULL);
1142 }
1143
1144 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 {
1146 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1147 return;
1148
1149 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1150 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151
1152 /* Non-mgmt controlled devices get this bit set
1153 * implicitly so that pairing works for them, however
1154 * for mgmt we require user-space to explicitly enable
1155 * it
1156 */
1157 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1158 }
1159
1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 void *data, u16 data_len)
1162 {
1163 struct mgmt_rp_read_info rp;
1164
1165 BT_DBG("sock %p %s", sk, hdev->name);
1166
1167 hci_dev_lock(hdev);
1168
1169 memset(&rp, 0, sizeof(rp));
1170
1171 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172
1173 rp.version = hdev->hci_ver;
1174 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175
1176 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178
1179 memcpy(rp.dev_class, hdev->dev_class, 3);
1180
1181 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183
1184 hci_dev_unlock(hdev);
1185
1186 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1187 sizeof(rp));
1188 }
1189
1190 static void mgmt_pending_free(struct pending_cmd *cmd)
1191 {
1192 sock_put(cmd->sk);
1193 kfree(cmd->param);
1194 kfree(cmd);
1195 }
1196
1197 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1198 struct hci_dev *hdev, void *data,
1199 u16 len)
1200 {
1201 struct pending_cmd *cmd;
1202
1203 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1204 if (!cmd)
1205 return NULL;
1206
1207 cmd->opcode = opcode;
1208 cmd->index = hdev->id;
1209
1210 cmd->param = kmemdup(data, len, GFP_KERNEL);
1211 if (!cmd->param) {
1212 kfree(cmd);
1213 return NULL;
1214 }
1215
1216 cmd->param_len = len;
1217
1218 cmd->sk = sk;
1219 sock_hold(sk);
1220
1221 list_add(&cmd->list, &hdev->mgmt_pending);
1222
1223 return cmd;
1224 }
1225
1226 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1227 void (*cb)(struct pending_cmd *cmd,
1228 void *data),
1229 void *data)
1230 {
1231 struct pending_cmd *cmd, *tmp;
1232
1233 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1234 if (opcode > 0 && cmd->opcode != opcode)
1235 continue;
1236
1237 cb(cmd, data);
1238 }
1239 }
1240
1241 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 {
1243 list_del(&cmd->list);
1244 mgmt_pending_free(cmd);
1245 }
1246
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250
1251 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 sizeof(settings));
1253 }
1254
1255 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 {
1257 BT_DBG("%s status 0x%02x", hdev->name, status);
1258
1259 if (hci_conn_count(hdev) == 0) {
1260 cancel_delayed_work(&hdev->power_off);
1261 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1262 }
1263 }
1264
1265 static bool hci_stop_discovery(struct hci_request *req)
1266 {
1267 struct hci_dev *hdev = req->hdev;
1268 struct hci_cp_remote_name_req_cancel cp;
1269 struct inquiry_entry *e;
1270
1271 switch (hdev->discovery.state) {
1272 case DISCOVERY_FINDING:
1273 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1274 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 } else {
1276 cancel_delayed_work(&hdev->le_scan_disable);
1277 hci_req_add_le_scan_disable(req);
1278 }
1279
1280 return true;
1281
1282 case DISCOVERY_RESOLVING:
1283 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1284 NAME_PENDING);
1285 if (!e)
1286 break;
1287
1288 bacpy(&cp.bdaddr, &e->data.bdaddr);
1289 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1290 &cp);
1291
1292 return true;
1293
1294 default:
1295 /* Passive scanning */
1296 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1297 hci_req_add_le_scan_disable(req);
1298 return true;
1299 }
1300
1301 break;
1302 }
1303
1304 return false;
1305 }
1306
1307 static int clean_up_hci_state(struct hci_dev *hdev)
1308 {
1309 struct hci_request req;
1310 struct hci_conn *conn;
1311 bool discov_stopped;
1312 int err;
1313
1314 hci_req_init(&req, hdev);
1315
1316 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1317 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 u8 scan = 0x00;
1319 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1320 }
1321
1322 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1323 disable_advertising(&req);
1324
1325 discov_stopped = hci_stop_discovery(&req);
1326
1327 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1328 struct hci_cp_disconnect dc;
1329 struct hci_cp_reject_conn_req rej;
1330
1331 switch (conn->state) {
1332 case BT_CONNECTED:
1333 case BT_CONFIG:
1334 dc.handle = cpu_to_le16(conn->handle);
1335 dc.reason = 0x15; /* Terminated due to Power Off */
1336 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1337 break;
1338 case BT_CONNECT:
1339 if (conn->type == LE_LINK)
1340 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 0, NULL);
1342 else if (conn->type == ACL_LINK)
1343 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1344 6, &conn->dst);
1345 break;
1346 case BT_CONNECT2:
1347 bacpy(&rej.bdaddr, &conn->dst);
1348 rej.reason = 0x15; /* Terminated due to Power Off */
1349 if (conn->type == ACL_LINK)
1350 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 sizeof(rej), &rej);
1352 else if (conn->type == SCO_LINK)
1353 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1354 sizeof(rej), &rej);
1355 break;
1356 }
1357 }
1358
1359 err = hci_req_run(&req, clean_up_hci_complete);
1360 if (!err && discov_stopped)
1361 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1362
1363 return err;
1364 }
1365
1366 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1367 u16 len)
1368 {
1369 struct mgmt_mode *cp = data;
1370 struct pending_cmd *cmd;
1371 int err;
1372
1373 BT_DBG("request for %s", hdev->name);
1374
1375 if (cp->val != 0x00 && cp->val != 0x01)
1376 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1377 MGMT_STATUS_INVALID_PARAMS);
1378
1379 hci_dev_lock(hdev);
1380
1381 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1382 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1383 MGMT_STATUS_BUSY);
1384 goto failed;
1385 }
1386
1387 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1388 cancel_delayed_work(&hdev->power_off);
1389
1390 if (cp->val) {
1391 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 data, len);
1393 err = mgmt_powered(hdev, 1);
1394 goto failed;
1395 }
1396 }
1397
1398 if (!!cp->val == hdev_is_powered(hdev)) {
1399 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1400 goto failed;
1401 }
1402
1403 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1404 if (!cmd) {
1405 err = -ENOMEM;
1406 goto failed;
1407 }
1408
1409 if (cp->val) {
1410 queue_work(hdev->req_workqueue, &hdev->power_on);
1411 err = 0;
1412 } else {
1413 /* Disconnect connections, stop scans, etc */
1414 err = clean_up_hci_state(hdev);
1415 if (!err)
1416 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1417 HCI_POWER_OFF_TIMEOUT);
1418
1419 /* ENODATA means there were no HCI commands queued */
1420 if (err == -ENODATA) {
1421 cancel_delayed_work(&hdev->power_off);
1422 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1423 err = 0;
1424 }
1425 }
1426
1427 failed:
1428 hci_dev_unlock(hdev);
1429 return err;
1430 }
1431
1432 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1433 {
1434 __le32 ev;
1435
1436 ev = cpu_to_le32(get_current_settings(hdev));
1437
1438 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1439 }
1440
1441 int mgmt_new_settings(struct hci_dev *hdev)
1442 {
1443 return new_settings(hdev, NULL);
1444 }
1445
1446 struct cmd_lookup {
1447 struct sock *sk;
1448 struct hci_dev *hdev;
1449 u8 mgmt_status;
1450 };
1451
1452 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 {
1454 struct cmd_lookup *match = data;
1455
1456 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457
1458 list_del(&cmd->list);
1459
1460 if (match->sk == NULL) {
1461 match->sk = cmd->sk;
1462 sock_hold(match->sk);
1463 }
1464
1465 mgmt_pending_free(cmd);
1466 }
1467
1468 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1469 {
1470 u8 *status = data;
1471
1472 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1473 mgmt_pending_remove(cmd);
1474 }
1475
1476 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 {
1478 if (cmd->cmd_complete) {
1479 u8 *status = data;
1480
1481 cmd->cmd_complete(cmd, *status);
1482 mgmt_pending_remove(cmd);
1483
1484 return;
1485 }
1486
1487 cmd_status_rsp(cmd, data);
1488 }
1489
1490 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 {
1492 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1493 cmd->param_len);
1494 }
1495
1496 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 {
1498 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1499 sizeof(struct mgmt_addr_info));
1500 }
1501
1502 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 {
1504 if (!lmp_bredr_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1507 return MGMT_STATUS_REJECTED;
1508 else
1509 return MGMT_STATUS_SUCCESS;
1510 }
1511
1512 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 {
1514 if (!lmp_le_capable(hdev))
1515 return MGMT_STATUS_NOT_SUPPORTED;
1516 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1517 return MGMT_STATUS_REJECTED;
1518 else
1519 return MGMT_STATUS_SUCCESS;
1520 }
1521
1522 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1523 {
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1527 bool changed;
1528
1529 BT_DBG("status 0x%02x", status);
1530
1531 hci_dev_lock(hdev);
1532
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1534 if (!cmd)
1535 goto unlock;
1536
1537 if (status) {
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1541 goto remove_cmd;
1542 }
1543
1544 cp = cmd->param;
1545 if (cp->val) {
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1547 &hdev->dev_flags);
1548
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1552 to);
1553 }
1554 } else {
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1556 &hdev->dev_flags);
1557 }
1558
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1560
1561 if (changed)
1562 new_settings(hdev, cmd->sk);
1563
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1567 * entries.
1568 */
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1571 update_class(&req);
1572 hci_req_run(&req, NULL);
1573
1574 remove_cmd:
1575 mgmt_pending_remove(cmd);
1576
1577 unlock:
1578 hci_dev_unlock(hdev);
1579 }
1580
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1582 u16 len)
1583 {
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1587 u16 timeout;
1588 u8 scan;
1589 int err;
1590
1591 BT_DBG("request for %s", hdev->name);
1592
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1597
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1601
1602 timeout = __le16_to_cpu(cp->timeout);
1603
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1606 */
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1611
1612 hci_dev_lock(hdev);
1613
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1617 goto failed;
1618 }
1619
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1623 MGMT_STATUS_BUSY);
1624 goto failed;
1625 }
1626
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1630 goto failed;
1631 }
1632
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1635
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 */
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1642 changed = true;
1643 }
1644
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1646 if (err < 0)
1647 goto failed;
1648
1649 if (changed)
1650 err = new_settings(hdev, sk);
1651
1652 goto failed;
1653 }
1654
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1658 */
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1664
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1668 to);
1669 }
1670
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1672 goto failed;
1673 }
1674
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1676 if (!cmd) {
1677 err = -ENOMEM;
1678 goto failed;
1679 }
1680
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1684 */
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1687
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 else
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693
1694 hci_req_init(&req, hdev);
1695
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1698 */
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1700 goto update_ad;
1701
1702 scan = SCAN_PAGE;
1703
1704 if (cp->val) {
1705 struct hci_cp_write_current_iac_lap hci_cp;
1706
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1716 } else {
1717 /* General discoverable mode */
1718 hci_cp.num_iac = 1;
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1722 }
1723
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726
1727 scan |= SCAN_INQUIRY;
1728 } else {
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1730 }
1731
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1733
1734 update_ad:
1735 update_adv_data(&req);
1736
1737 err = hci_req_run(&req, set_discoverable_complete);
1738 if (err < 0)
1739 mgmt_pending_remove(cmd);
1740
1741 failed:
1742 hci_dev_unlock(hdev);
1743 return err;
1744 }
1745
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 {
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1750 u8 type;
1751
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1753 return;
1754
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1756 return;
1757
1758 if (enable) {
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1760
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1763 } else {
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1768 }
1769
1770 acp.window = cpu_to_le16(0x0012);
1771
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1775 sizeof(acp), &acp);
1776
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1779 }
1780
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1782 {
1783 struct pending_cmd *cmd;
1784 struct mgmt_mode *cp;
1785 bool conn_changed, discov_changed;
1786
1787 BT_DBG("status 0x%02x", status);
1788
1789 hci_dev_lock(hdev);
1790
1791 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1792 if (!cmd)
1793 goto unlock;
1794
1795 if (status) {
1796 u8 mgmt_err = mgmt_status(status);
1797 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1798 goto remove_cmd;
1799 }
1800
1801 cp = cmd->param;
1802 if (cp->val) {
1803 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1804 &hdev->dev_flags);
1805 discov_changed = false;
1806 } else {
1807 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1808 &hdev->dev_flags);
1809 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1810 &hdev->dev_flags);
1811 }
1812
1813 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1814
1815 if (conn_changed || discov_changed) {
1816 new_settings(hdev, cmd->sk);
1817 hci_update_page_scan(hdev);
1818 if (discov_changed)
1819 mgmt_update_adv_data(hdev);
1820 hci_update_background_scan(hdev);
1821 }
1822
1823 remove_cmd:
1824 mgmt_pending_remove(cmd);
1825
1826 unlock:
1827 hci_dev_unlock(hdev);
1828 }
1829
1830 static int set_connectable_update_settings(struct hci_dev *hdev,
1831 struct sock *sk, u8 val)
1832 {
1833 bool changed = false;
1834 int err;
1835
1836 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1837 changed = true;
1838
1839 if (val) {
1840 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1841 } else {
1842 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1844 }
1845
1846 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1847 if (err < 0)
1848 return err;
1849
1850 if (changed) {
1851 hci_update_page_scan(hdev);
1852 hci_update_background_scan(hdev);
1853 return new_settings(hdev, sk);
1854 }
1855
1856 return 0;
1857 }
1858
1859 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1860 u16 len)
1861 {
1862 struct mgmt_mode *cp = data;
1863 struct pending_cmd *cmd;
1864 struct hci_request req;
1865 u8 scan;
1866 int err;
1867
1868 BT_DBG("request for %s", hdev->name);
1869
1870 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1871 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1872 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1873 MGMT_STATUS_REJECTED);
1874
1875 if (cp->val != 0x00 && cp->val != 0x01)
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1877 MGMT_STATUS_INVALID_PARAMS);
1878
1879 hci_dev_lock(hdev);
1880
1881 if (!hdev_is_powered(hdev)) {
1882 err = set_connectable_update_settings(hdev, sk, cp->val);
1883 goto failed;
1884 }
1885
1886 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1887 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1888 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1889 MGMT_STATUS_BUSY);
1890 goto failed;
1891 }
1892
1893 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1894 if (!cmd) {
1895 err = -ENOMEM;
1896 goto failed;
1897 }
1898
1899 hci_req_init(&req, hdev);
1900
1901 /* If BR/EDR is not enabled and we disable advertising as a
1902 * by-product of disabling connectable, we need to update the
1903 * advertising flags.
1904 */
1905 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1906 if (!cp->val) {
1907 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1908 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1909 }
1910 update_adv_data(&req);
1911 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1912 if (cp->val) {
1913 scan = SCAN_PAGE;
1914 } else {
1915 /* If we don't have any whitelist entries just
1916 * disable all scanning. If there are entries
1917 * and we had both page and inquiry scanning
1918 * enabled then fall back to only page scanning.
1919 * Otherwise no changes are needed.
1920 */
1921 if (list_empty(&hdev->whitelist))
1922 scan = SCAN_DISABLED;
1923 else if (test_bit(HCI_ISCAN, &hdev->flags))
1924 scan = SCAN_PAGE;
1925 else
1926 goto no_scan_update;
1927
1928 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1929 hdev->discov_timeout > 0)
1930 cancel_delayed_work(&hdev->discov_off);
1931 }
1932
1933 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1934 }
1935
1936 no_scan_update:
1937 /* If we're going from non-connectable to connectable or
1938 * vice-versa when fast connectable is enabled ensure that fast
1939 * connectable gets disabled. write_fast_connectable won't do
1940 * anything if the page scan parameters are already what they
1941 * should be.
1942 */
1943 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1944 write_fast_connectable(&req, false);
1945
1946 /* Update the advertising parameters if necessary */
1947 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1948 enable_advertising(&req);
1949
1950 err = hci_req_run(&req, set_connectable_complete);
1951 if (err < 0) {
1952 mgmt_pending_remove(cmd);
1953 if (err == -ENODATA)
1954 err = set_connectable_update_settings(hdev, sk,
1955 cp->val);
1956 goto failed;
1957 }
1958
1959 failed:
1960 hci_dev_unlock(hdev);
1961 return err;
1962 }
1963
1964 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1965 u16 len)
1966 {
1967 struct mgmt_mode *cp = data;
1968 bool changed;
1969 int err;
1970
1971 BT_DBG("request for %s", hdev->name);
1972
1973 if (cp->val != 0x00 && cp->val != 0x01)
1974 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1975 MGMT_STATUS_INVALID_PARAMS);
1976
1977 hci_dev_lock(hdev);
1978
1979 if (cp->val)
1980 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1981 else
1982 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1983
1984 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1985 if (err < 0)
1986 goto unlock;
1987
1988 if (changed)
1989 err = new_settings(hdev, sk);
1990
1991 unlock:
1992 hci_dev_unlock(hdev);
1993 return err;
1994 }
1995
1996 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1997 u16 len)
1998 {
1999 struct mgmt_mode *cp = data;
2000 struct pending_cmd *cmd;
2001 u8 val, status;
2002 int err;
2003
2004 BT_DBG("request for %s", hdev->name);
2005
2006 status = mgmt_bredr_support(hdev);
2007 if (status)
2008 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2009 status);
2010
2011 if (cp->val != 0x00 && cp->val != 0x01)
2012 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2013 MGMT_STATUS_INVALID_PARAMS);
2014
2015 hci_dev_lock(hdev);
2016
2017 if (!hdev_is_powered(hdev)) {
2018 bool changed = false;
2019
2020 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2021 &hdev->dev_flags)) {
2022 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2023 changed = true;
2024 }
2025
2026 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2027 if (err < 0)
2028 goto failed;
2029
2030 if (changed)
2031 err = new_settings(hdev, sk);
2032
2033 goto failed;
2034 }
2035
2036 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2037 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2038 MGMT_STATUS_BUSY);
2039 goto failed;
2040 }
2041
2042 val = !!cp->val;
2043
2044 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2045 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2046 goto failed;
2047 }
2048
2049 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2050 if (!cmd) {
2051 err = -ENOMEM;
2052 goto failed;
2053 }
2054
2055 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2056 if (err < 0) {
2057 mgmt_pending_remove(cmd);
2058 goto failed;
2059 }
2060
2061 failed:
2062 hci_dev_unlock(hdev);
2063 return err;
2064 }
2065
2066 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 {
2068 struct mgmt_mode *cp = data;
2069 struct pending_cmd *cmd;
2070 u8 status;
2071 int err;
2072
2073 BT_DBG("request for %s", hdev->name);
2074
2075 status = mgmt_bredr_support(hdev);
2076 if (status)
2077 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2078
2079 if (!lmp_ssp_capable(hdev))
2080 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2081 MGMT_STATUS_NOT_SUPPORTED);
2082
2083 if (cp->val != 0x00 && cp->val != 0x01)
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2085 MGMT_STATUS_INVALID_PARAMS);
2086
2087 hci_dev_lock(hdev);
2088
2089 if (!hdev_is_powered(hdev)) {
2090 bool changed;
2091
2092 if (cp->val) {
2093 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2094 &hdev->dev_flags);
2095 } else {
2096 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2097 &hdev->dev_flags);
2098 if (!changed)
2099 changed = test_and_clear_bit(HCI_HS_ENABLED,
2100 &hdev->dev_flags);
2101 else
2102 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2103 }
2104
2105 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2106 if (err < 0)
2107 goto failed;
2108
2109 if (changed)
2110 err = new_settings(hdev, sk);
2111
2112 goto failed;
2113 }
2114
2115 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2116 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2117 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2118 MGMT_STATUS_BUSY);
2119 goto failed;
2120 }
2121
2122 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2123 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2124 goto failed;
2125 }
2126
2127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2128 if (!cmd) {
2129 err = -ENOMEM;
2130 goto failed;
2131 }
2132
2133 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2134 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2135 sizeof(cp->val), &cp->val);
2136
2137 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2138 if (err < 0) {
2139 mgmt_pending_remove(cmd);
2140 goto failed;
2141 }
2142
2143 failed:
2144 hci_dev_unlock(hdev);
2145 return err;
2146 }
2147
2148 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2149 {
2150 struct mgmt_mode *cp = data;
2151 bool changed;
2152 u8 status;
2153 int err;
2154
2155 BT_DBG("request for %s", hdev->name);
2156
2157 status = mgmt_bredr_support(hdev);
2158 if (status)
2159 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2160
2161 if (!lmp_ssp_capable(hdev))
2162 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2163 MGMT_STATUS_NOT_SUPPORTED);
2164
2165 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_REJECTED);
2168
2169 if (cp->val != 0x00 && cp->val != 0x01)
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_INVALID_PARAMS);
2172
2173 hci_dev_lock(hdev);
2174
2175 if (cp->val) {
2176 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2177 } else {
2178 if (hdev_is_powered(hdev)) {
2179 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2180 MGMT_STATUS_REJECTED);
2181 goto unlock;
2182 }
2183
2184 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2185 }
2186
2187 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2188 if (err < 0)
2189 goto unlock;
2190
2191 if (changed)
2192 err = new_settings(hdev, sk);
2193
2194 unlock:
2195 hci_dev_unlock(hdev);
2196 return err;
2197 }
2198
2199 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2200 {
2201 struct cmd_lookup match = { NULL, hdev };
2202
2203 hci_dev_lock(hdev);
2204
2205 if (status) {
2206 u8 mgmt_err = mgmt_status(status);
2207
2208 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2209 &mgmt_err);
2210 goto unlock;
2211 }
2212
2213 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2214
2215 new_settings(hdev, match.sk);
2216
2217 if (match.sk)
2218 sock_put(match.sk);
2219
2220 /* Make sure the controller has a good default for
2221 * advertising data. Restrict the update to when LE
2222 * has actually been enabled. During power on, the
2223 * update in powered_update_hci will take care of it.
2224 */
2225 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2226 struct hci_request req;
2227
2228 hci_req_init(&req, hdev);
2229 update_adv_data(&req);
2230 update_scan_rsp_data(&req);
2231 hci_req_run(&req, NULL);
2232
2233 hci_update_background_scan(hdev);
2234 }
2235
2236 unlock:
2237 hci_dev_unlock(hdev);
2238 }
2239
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2241 {
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2246 int err;
2247 u8 val, enabled;
2248
2249 BT_DBG("request for %s", hdev->name);
2250
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2254
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2258
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2263
2264 hci_dev_lock(hdev);
2265
2266 val = !!cp->val;
2267 enabled = lmp_host_le_capable(hdev);
2268
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2271
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2274 changed = true;
2275 }
2276
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2279 changed = true;
2280 }
2281
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2283 if (err < 0)
2284 goto unlock;
2285
2286 if (changed)
2287 err = new_settings(hdev, sk);
2288
2289 goto unlock;
2290 }
2291
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2295 MGMT_STATUS_BUSY);
2296 goto unlock;
2297 }
2298
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2300 if (!cmd) {
2301 err = -ENOMEM;
2302 goto unlock;
2303 }
2304
2305 hci_req_init(&req, hdev);
2306
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2308
2309 if (val) {
2310 hci_cp.le = val;
2311 hci_cp.simul = 0x00;
2312 } else {
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2315 }
2316
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2318 &hci_cp);
2319
2320 err = hci_req_run(&req, le_enable_complete);
2321 if (err < 0)
2322 mgmt_pending_remove(cmd);
2323
2324 unlock:
2325 hci_dev_unlock(hdev);
2326 return err;
2327 }
2328
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2334 */
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2336 {
2337 struct pending_cmd *cmd;
2338
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2345 return true;
2346 }
2347 }
2348
2349 return false;
2350 }
2351
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2355 };
2356
2357 static u8 get_uuid_size(const u8 *uuid)
2358 {
2359 u32 val;
2360
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2362 return 128;
2363
2364 val = get_unaligned_le32(&uuid[12]);
2365 if (val > 0xffff)
2366 return 32;
2367
2368 return 16;
2369 }
2370
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2372 {
2373 struct pending_cmd *cmd;
2374
2375 hci_dev_lock(hdev);
2376
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2378 if (!cmd)
2379 goto unlock;
2380
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2383
2384 mgmt_pending_remove(cmd);
2385
2386 unlock:
2387 hci_dev_unlock(hdev);
2388 }
2389
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2391 {
2392 BT_DBG("status 0x%02x", status);
2393
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2395 }
2396
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 {
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2403 int err;
2404
2405 BT_DBG("request for %s", hdev->name);
2406
2407 hci_dev_lock(hdev);
2408
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2411 MGMT_STATUS_BUSY);
2412 goto failed;
2413 }
2414
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2416 if (!uuid) {
2417 err = -ENOMEM;
2418 goto failed;
2419 }
2420
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2424
2425 list_add_tail(&uuid->list, &hdev->uuids);
2426
2427 hci_req_init(&req, hdev);
2428
2429 update_class(&req);
2430 update_eir(&req);
2431
2432 err = hci_req_run(&req, add_uuid_complete);
2433 if (err < 0) {
2434 if (err != -ENODATA)
2435 goto failed;
2436
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2439 goto failed;
2440 }
2441
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2443 if (!cmd) {
2444 err = -ENOMEM;
2445 goto failed;
2446 }
2447
2448 err = 0;
2449
2450 failed:
2451 hci_dev_unlock(hdev);
2452 return err;
2453 }
2454
2455 static bool enable_service_cache(struct hci_dev *hdev)
2456 {
2457 if (!hdev_is_powered(hdev))
2458 return false;
2459
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2462 CACHE_TIMEOUT);
2463 return true;
2464 }
2465
2466 return false;
2467 }
2468
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2470 {
2471 BT_DBG("status 0x%02x", status);
2472
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2474 }
2475
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2477 u16 len)
2478 {
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2484 int err, found;
2485
2486 BT_DBG("request for %s", hdev->name);
2487
2488 hci_dev_lock(hdev);
2489
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2492 MGMT_STATUS_BUSY);
2493 goto unlock;
2494 }
2495
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2498
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2502 goto unlock;
2503 }
2504
2505 goto update_class;
2506 }
2507
2508 found = 0;
2509
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2512 continue;
2513
2514 list_del(&match->list);
2515 kfree(match);
2516 found++;
2517 }
2518
2519 if (found == 0) {
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2522 goto unlock;
2523 }
2524
2525 update_class:
2526 hci_req_init(&req, hdev);
2527
2528 update_class(&req);
2529 update_eir(&req);
2530
2531 err = hci_req_run(&req, remove_uuid_complete);
2532 if (err < 0) {
2533 if (err != -ENODATA)
2534 goto unlock;
2535
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2538 goto unlock;
2539 }
2540
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2542 if (!cmd) {
2543 err = -ENOMEM;
2544 goto unlock;
2545 }
2546
2547 err = 0;
2548
2549 unlock:
2550 hci_dev_unlock(hdev);
2551 return err;
2552 }
2553
2554 static void set_class_complete(struct hci_dev *hdev, u8 status)
2555 {
2556 BT_DBG("status 0x%02x", status);
2557
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2559 }
2560
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2562 u16 len)
2563 {
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2567 int err;
2568
2569 BT_DBG("request for %s", hdev->name);
2570
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2574
2575 hci_dev_lock(hdev);
2576
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2579 MGMT_STATUS_BUSY);
2580 goto unlock;
2581 }
2582
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2586 goto unlock;
2587 }
2588
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2591
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2595 goto unlock;
2596 }
2597
2598 hci_req_init(&req, hdev);
2599
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2603 hci_dev_lock(hdev);
2604 update_eir(&req);
2605 }
2606
2607 update_class(&req);
2608
2609 err = hci_req_run(&req, set_class_complete);
2610 if (err < 0) {
2611 if (err != -ENODATA)
2612 goto unlock;
2613
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2616 goto unlock;
2617 }
2618
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2620 if (!cmd) {
2621 err = -ENOMEM;
2622 goto unlock;
2623 }
2624
2625 err = 0;
2626
2627 unlock:
2628 hci_dev_unlock(hdev);
2629 return err;
2630 }
2631
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2633 u16 len)
2634 {
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2639 bool changed;
2640 int i;
2641
2642 BT_DBG("request for %s", hdev->name);
2643
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2647
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2651 key_count);
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2654 }
2655
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2660 expected_len, len);
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2663 }
2664
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2668
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2670 key_count);
2671
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2674
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2678 }
2679
2680 hci_dev_lock(hdev);
2681
2682 hci_link_keys_clear(hdev);
2683
2684 if (cp->debug_keys)
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2686 &hdev->dev_flags);
2687 else
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2689 &hdev->dev_flags);
2690
2691 if (changed)
2692 new_settings(hdev, NULL);
2693
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2696
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2699 */
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2701 continue;
2702
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2705 }
2706
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2708
2709 hci_dev_unlock(hdev);
2710
2711 return 0;
2712 }
2713
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2716 {
2717 struct mgmt_ev_device_unpaired ev;
2718
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2721
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2723 skip_sk);
2724 }
2725
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2734 int err;
2735
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2739
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2743 &rp, sizeof(rp));
2744
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2748 &rp, sizeof(rp));
2749
2750 hci_dev_lock(hdev);
2751
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2755 goto unlock;
2756 }
2757
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2762 *
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2765 */
2766 if (cp->disconnect)
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2768 &cp->addr.bdaddr);
2769 else
2770 conn = NULL;
2771
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2773 } else {
2774 u8 addr_type;
2775
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2777 &cp->addr.bdaddr);
2778 if (conn) {
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2782 */
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2784
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2788 */
2789 if (!cp->disconnect)
2790 conn = NULL;
2791 }
2792
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2795 else
2796 addr_type = ADDR_LE_DEV_RANDOM;
2797
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2799
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2801 }
2802
2803 if (err < 0) {
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2806 goto unlock;
2807 }
2808
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2811 */
2812 if (!conn) {
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2814 &rp, sizeof(rp));
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2816 goto unlock;
2817 }
2818
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2820 sizeof(*cp));
2821 if (!cmd) {
2822 err = -ENOMEM;
2823 goto unlock;
2824 }
2825
2826 cmd->cmd_complete = addr_cmd_complete;
2827
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 if (err < 0)
2832 mgmt_pending_remove(cmd);
2833
2834 unlock:
2835 hci_dev_unlock(hdev);
2836 return err;
2837 }
2838
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2840 u16 len)
2841 {
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2846 int err;
2847
2848 BT_DBG("");
2849
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2853
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2857 &rp, sizeof(rp));
2858
2859 hci_dev_lock(hdev);
2860
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2864 goto failed;
2865 }
2866
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2870 goto failed;
2871 }
2872
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2875 &cp->addr.bdaddr);
2876 else
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2878
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2882 goto failed;
2883 }
2884
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2886 if (!cmd) {
2887 err = -ENOMEM;
2888 goto failed;
2889 }
2890
2891 cmd->cmd_complete = generic_cmd_complete;
2892
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2894 if (err < 0)
2895 mgmt_pending_remove(cmd);
2896
2897 failed:
2898 hci_dev_unlock(hdev);
2899 return err;
2900 }
2901
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2903 {
2904 switch (link_type) {
2905 case LE_LINK:
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2909
2910 default:
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2913 }
2914
2915 default:
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2918 }
2919 }
2920
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2922 u16 data_len)
2923 {
2924 struct mgmt_rp_get_connections *rp;
2925 struct hci_conn *c;
2926 size_t rp_len;
2927 int err;
2928 u16 i;
2929
2930 BT_DBG("");
2931
2932 hci_dev_lock(hdev);
2933
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2937 goto unlock;
2938 }
2939
2940 i = 0;
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2943 i++;
2944 }
2945
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2948 if (!rp) {
2949 err = -ENOMEM;
2950 goto unlock;
2951 }
2952
2953 i = 0;
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2956 continue;
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2960 continue;
2961 i++;
2962 }
2963
2964 rp->conn_count = cpu_to_le16(i);
2965
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2968
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2970 rp_len);
2971
2972 kfree(rp);
2973
2974 unlock:
2975 hci_dev_unlock(hdev);
2976 return err;
2977 }
2978
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2981 {
2982 struct pending_cmd *cmd;
2983 int err;
2984
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2986 sizeof(*cp));
2987 if (!cmd)
2988 return -ENOMEM;
2989
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2992 if (err < 0)
2993 mgmt_pending_remove(cmd);
2994
2995 return err;
2996 }
2997
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2999 u16 len)
3000 {
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3005 int err;
3006
3007 BT_DBG("");
3008
3009 hci_dev_lock(hdev);
3010
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3014 goto failed;
3015 }
3016
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3018 if (!conn) {
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3021 goto failed;
3022 }
3023
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3026
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3028
3029 BT_ERR("PIN code is not 16 bytes long");
3030
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3032 if (err >= 0)
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3035
3036 goto failed;
3037 }
3038
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3040 if (!cmd) {
3041 err = -ENOMEM;
3042 goto failed;
3043 }
3044
3045 cmd->cmd_complete = addr_cmd_complete;
3046
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3050
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3052 if (err < 0)
3053 mgmt_pending_remove(cmd);
3054
3055 failed:
3056 hci_dev_unlock(hdev);
3057 return err;
3058 }
3059
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3061 u16 len)
3062 {
3063 struct mgmt_cp_set_io_capability *cp = data;
3064
3065 BT_DBG("");
3066
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3070
3071 hci_dev_lock(hdev);
3072
3073 hdev->io_capability = cp->io_capability;
3074
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3077
3078 hci_dev_unlock(hdev);
3079
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3081 0);
3082 }
3083
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3085 {
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3088
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3091 continue;
3092
3093 if (cmd->user_data != conn)
3094 continue;
3095
3096 return cmd;
3097 }
3098
3099 return NULL;
3100 }
3101
3102 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3103 {
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3106
3107 bacpy(&rp.addr.bdaddr, &conn->dst);
3108 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3109
3110 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3111 &rp, sizeof(rp));
3112
3113 /* So we don't get further callbacks for this connection */
3114 conn->connect_cfm_cb = NULL;
3115 conn->security_cfm_cb = NULL;
3116 conn->disconn_cfm_cb = NULL;
3117
3118 hci_conn_drop(conn);
3119
3120 /* The device is paired so there is no need to remove
3121 * its connection parameters anymore.
3122 */
3123 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3124
3125 hci_conn_put(conn);
3126 }
3127
3128 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3129 {
3130 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3131 struct pending_cmd *cmd;
3132
3133 cmd = find_pairing(conn);
3134 if (cmd) {
3135 cmd->cmd_complete(cmd, status);
3136 mgmt_pending_remove(cmd);
3137 }
3138 }
3139
3140 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3141 {
3142 struct pending_cmd *cmd;
3143
3144 BT_DBG("status %u", status);
3145
3146 cmd = find_pairing(conn);
3147 if (!cmd) {
3148 BT_DBG("Unable to find a pending command");
3149 return;
3150 }
3151
3152 cmd->cmd_complete(cmd, mgmt_status(status));
3153 mgmt_pending_remove(cmd);
3154 }
3155
3156 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3157 {
3158 struct pending_cmd *cmd;
3159
3160 BT_DBG("status %u", status);
3161
3162 if (!status)
3163 return;
3164
3165 cmd = find_pairing(conn);
3166 if (!cmd) {
3167 BT_DBG("Unable to find a pending command");
3168 return;
3169 }
3170
3171 cmd->cmd_complete(cmd, mgmt_status(status));
3172 mgmt_pending_remove(cmd);
3173 }
3174
3175 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3176 u16 len)
3177 {
3178 struct mgmt_cp_pair_device *cp = data;
3179 struct mgmt_rp_pair_device rp;
3180 struct pending_cmd *cmd;
3181 u8 sec_level, auth_type;
3182 struct hci_conn *conn;
3183 int err;
3184
3185 BT_DBG("");
3186
3187 memset(&rp, 0, sizeof(rp));
3188 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3189 rp.addr.type = cp->addr.type;
3190
3191 if (!bdaddr_type_is_valid(cp->addr.type))
3192 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3193 MGMT_STATUS_INVALID_PARAMS,
3194 &rp, sizeof(rp));
3195
3196 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3197 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3198 MGMT_STATUS_INVALID_PARAMS,
3199 &rp, sizeof(rp));
3200
3201 hci_dev_lock(hdev);
3202
3203 if (!hdev_is_powered(hdev)) {
3204 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3205 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3206 goto unlock;
3207 }
3208
3209 sec_level = BT_SECURITY_MEDIUM;
3210 auth_type = HCI_AT_DEDICATED_BONDING;
3211
3212 if (cp->addr.type == BDADDR_BREDR) {
3213 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3214 auth_type);
3215 } else {
3216 u8 addr_type;
3217
3218 /* Convert from L2CAP channel address type to HCI address type
3219 */
3220 if (cp->addr.type == BDADDR_LE_PUBLIC)
3221 addr_type = ADDR_LE_DEV_PUBLIC;
3222 else
3223 addr_type = ADDR_LE_DEV_RANDOM;
3224
3225 /* When pairing a new device, it is expected to remember
3226 * this device for future connections. Adding the connection
3227 * parameter information ahead of time allows tracking
3228 * of the slave preferred values and will speed up any
3229 * further connection establishment.
3230 *
3231 * If connection parameters already exist, then they
3232 * will be kept and this function does nothing.
3233 */
3234 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3235
3236 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3237 sec_level, HCI_LE_CONN_TIMEOUT,
3238 HCI_ROLE_MASTER);
3239 }
3240
3241 if (IS_ERR(conn)) {
3242 int status;
3243
3244 if (PTR_ERR(conn) == -EBUSY)
3245 status = MGMT_STATUS_BUSY;
3246 else
3247 status = MGMT_STATUS_CONNECT_FAILED;
3248
3249 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3250 status, &rp,
3251 sizeof(rp));
3252 goto unlock;
3253 }
3254
3255 if (conn->connect_cfm_cb) {
3256 hci_conn_drop(conn);
3257 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3258 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3259 goto unlock;
3260 }
3261
3262 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3263 if (!cmd) {
3264 err = -ENOMEM;
3265 hci_conn_drop(conn);
3266 goto unlock;
3267 }
3268
3269 cmd->cmd_complete = pairing_complete;
3270
3271 /* For LE, just connecting isn't a proof that the pairing finished */
3272 if (cp->addr.type == BDADDR_BREDR) {
3273 conn->connect_cfm_cb = pairing_complete_cb;
3274 conn->security_cfm_cb = pairing_complete_cb;
3275 conn->disconn_cfm_cb = pairing_complete_cb;
3276 } else {
3277 conn->connect_cfm_cb = le_pairing_complete_cb;
3278 conn->security_cfm_cb = le_pairing_complete_cb;
3279 conn->disconn_cfm_cb = le_pairing_complete_cb;
3280 }
3281
3282 conn->io_capability = cp->io_cap;
3283 cmd->user_data = hci_conn_get(conn);
3284
3285 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3286 hci_conn_security(conn, sec_level, auth_type, true)) {
3287 cmd->cmd_complete(cmd, 0);
3288 mgmt_pending_remove(cmd);
3289 }
3290
3291 err = 0;
3292
3293 unlock:
3294 hci_dev_unlock(hdev);
3295 return err;
3296 }
3297
3298 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3299 u16 len)
3300 {
3301 struct mgmt_addr_info *addr = data;
3302 struct pending_cmd *cmd;
3303 struct hci_conn *conn;
3304 int err;
3305
3306 BT_DBG("");
3307
3308 hci_dev_lock(hdev);
3309
3310 if (!hdev_is_powered(hdev)) {
3311 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3312 MGMT_STATUS_NOT_POWERED);
3313 goto unlock;
3314 }
3315
3316 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3317 if (!cmd) {
3318 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3319 MGMT_STATUS_INVALID_PARAMS);
3320 goto unlock;
3321 }
3322
3323 conn = cmd->user_data;
3324
3325 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3326 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3327 MGMT_STATUS_INVALID_PARAMS);
3328 goto unlock;
3329 }
3330
3331 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3332 mgmt_pending_remove(cmd);
3333
3334 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3335 addr, sizeof(*addr));
3336 unlock:
3337 hci_dev_unlock(hdev);
3338 return err;
3339 }
3340
3341 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3342 struct mgmt_addr_info *addr, u16 mgmt_op,
3343 u16 hci_op, __le32 passkey)
3344 {
3345 struct pending_cmd *cmd;
3346 struct hci_conn *conn;
3347 int err;
3348
3349 hci_dev_lock(hdev);
3350
3351 if (!hdev_is_powered(hdev)) {
3352 err = cmd_complete(sk, hdev->id, mgmt_op,
3353 MGMT_STATUS_NOT_POWERED, addr,
3354 sizeof(*addr));
3355 goto done;
3356 }
3357
3358 if (addr->type == BDADDR_BREDR)
3359 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3360 else
3361 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3362
3363 if (!conn) {
3364 err = cmd_complete(sk, hdev->id, mgmt_op,
3365 MGMT_STATUS_NOT_CONNECTED, addr,
3366 sizeof(*addr));
3367 goto done;
3368 }
3369
3370 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3371 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3372 if (!err)
3373 err = cmd_complete(sk, hdev->id, mgmt_op,
3374 MGMT_STATUS_SUCCESS, addr,
3375 sizeof(*addr));
3376 else
3377 err = cmd_complete(sk, hdev->id, mgmt_op,
3378 MGMT_STATUS_FAILED, addr,
3379 sizeof(*addr));
3380
3381 goto done;
3382 }
3383
3384 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3385 if (!cmd) {
3386 err = -ENOMEM;
3387 goto done;
3388 }
3389
3390 cmd->cmd_complete = addr_cmd_complete;
3391
3392 /* Continue with pairing via HCI */
3393 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3394 struct hci_cp_user_passkey_reply cp;
3395
3396 bacpy(&cp.bdaddr, &addr->bdaddr);
3397 cp.passkey = passkey;
3398 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3399 } else
3400 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3401 &addr->bdaddr);
3402
3403 if (err < 0)
3404 mgmt_pending_remove(cmd);
3405
3406 done:
3407 hci_dev_unlock(hdev);
3408 return err;
3409 }
3410
3411 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3412 void *data, u16 len)
3413 {
3414 struct mgmt_cp_pin_code_neg_reply *cp = data;
3415
3416 BT_DBG("");
3417
3418 return user_pairing_resp(sk, hdev, &cp->addr,
3419 MGMT_OP_PIN_CODE_NEG_REPLY,
3420 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3421 }
3422
3423 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3424 u16 len)
3425 {
3426 struct mgmt_cp_user_confirm_reply *cp = data;
3427
3428 BT_DBG("");
3429
3430 if (len != sizeof(*cp))
3431 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3432 MGMT_STATUS_INVALID_PARAMS);
3433
3434 return user_pairing_resp(sk, hdev, &cp->addr,
3435 MGMT_OP_USER_CONFIRM_REPLY,
3436 HCI_OP_USER_CONFIRM_REPLY, 0);
3437 }
3438
3439 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3440 void *data, u16 len)
3441 {
3442 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3443
3444 BT_DBG("");
3445
3446 return user_pairing_resp(sk, hdev, &cp->addr,
3447 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3448 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3449 }
3450
3451 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3452 u16 len)
3453 {
3454 struct mgmt_cp_user_passkey_reply *cp = data;
3455
3456 BT_DBG("");
3457
3458 return user_pairing_resp(sk, hdev, &cp->addr,
3459 MGMT_OP_USER_PASSKEY_REPLY,
3460 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3461 }
3462
3463 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3465 {
3466 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3467
3468 BT_DBG("");
3469
3470 return user_pairing_resp(sk, hdev, &cp->addr,
3471 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3472 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3473 }
3474
3475 static void update_name(struct hci_request *req)
3476 {
3477 struct hci_dev *hdev = req->hdev;
3478 struct hci_cp_write_local_name cp;
3479
3480 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3481
3482 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3483 }
3484
3485 static void set_name_complete(struct hci_dev *hdev, u8 status)
3486 {
3487 struct mgmt_cp_set_local_name *cp;
3488 struct pending_cmd *cmd;
3489
3490 BT_DBG("status 0x%02x", status);
3491
3492 hci_dev_lock(hdev);
3493
3494 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3495 if (!cmd)
3496 goto unlock;
3497
3498 cp = cmd->param;
3499
3500 if (status)
3501 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3502 mgmt_status(status));
3503 else
3504 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3505 cp, sizeof(*cp));
3506
3507 mgmt_pending_remove(cmd);
3508
3509 unlock:
3510 hci_dev_unlock(hdev);
3511 }
3512
3513 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3514 u16 len)
3515 {
3516 struct mgmt_cp_set_local_name *cp = data;
3517 struct pending_cmd *cmd;
3518 struct hci_request req;
3519 int err;
3520
3521 BT_DBG("");
3522
3523 hci_dev_lock(hdev);
3524
3525 /* If the old values are the same as the new ones just return a
3526 * direct command complete event.
3527 */
3528 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3529 !memcmp(hdev->short_name, cp->short_name,
3530 sizeof(hdev->short_name))) {
3531 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3532 data, len);
3533 goto failed;
3534 }
3535
3536 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3537
3538 if (!hdev_is_powered(hdev)) {
3539 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3540
3541 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3542 data, len);
3543 if (err < 0)
3544 goto failed;
3545
3546 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3547 sk);
3548
3549 goto failed;
3550 }
3551
3552 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3553 if (!cmd) {
3554 err = -ENOMEM;
3555 goto failed;
3556 }
3557
3558 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3559
3560 hci_req_init(&req, hdev);
3561
3562 if (lmp_bredr_capable(hdev)) {
3563 update_name(&req);
3564 update_eir(&req);
3565 }
3566
3567 /* The name is stored in the scan response data and so
3568 * no need to udpate the advertising data here.
3569 */
3570 if (lmp_le_capable(hdev))
3571 update_scan_rsp_data(&req);
3572
3573 err = hci_req_run(&req, set_name_complete);
3574 if (err < 0)
3575 mgmt_pending_remove(cmd);
3576
3577 failed:
3578 hci_dev_unlock(hdev);
3579 return err;
3580 }
3581
3582 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3583 void *data, u16 data_len)
3584 {
3585 struct pending_cmd *cmd;
3586 int err;
3587
3588 BT_DBG("%s", hdev->name);
3589
3590 hci_dev_lock(hdev);
3591
3592 if (!hdev_is_powered(hdev)) {
3593 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3594 MGMT_STATUS_NOT_POWERED);
3595 goto unlock;
3596 }
3597
3598 if (!lmp_ssp_capable(hdev)) {
3599 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3600 MGMT_STATUS_NOT_SUPPORTED);
3601 goto unlock;
3602 }
3603
3604 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3605 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3606 MGMT_STATUS_BUSY);
3607 goto unlock;
3608 }
3609
3610 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3611 if (!cmd) {
3612 err = -ENOMEM;
3613 goto unlock;
3614 }
3615
3616 if (bredr_sc_enabled(hdev))
3617 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3618 0, NULL);
3619 else
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3621
3622 if (err < 0)
3623 mgmt_pending_remove(cmd);
3624
3625 unlock:
3626 hci_dev_unlock(hdev);
3627 return err;
3628 }
3629
3630 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3631 void *data, u16 len)
3632 {
3633 int err;
3634
3635 BT_DBG("%s ", hdev->name);
3636
3637 hci_dev_lock(hdev);
3638
3639 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3640 struct mgmt_cp_add_remote_oob_data *cp = data;
3641 u8 status;
3642
3643 if (cp->addr.type != BDADDR_BREDR) {
3644 err = cmd_complete(sk, hdev->id,
3645 MGMT_OP_ADD_REMOTE_OOB_DATA,
3646 MGMT_STATUS_INVALID_PARAMS,
3647 &cp->addr, sizeof(cp->addr));
3648 goto unlock;
3649 }
3650
3651 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3652 cp->addr.type, cp->hash,
3653 cp->rand, NULL, NULL);
3654 if (err < 0)
3655 status = MGMT_STATUS_FAILED;
3656 else
3657 status = MGMT_STATUS_SUCCESS;
3658
3659 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3660 status, &cp->addr, sizeof(cp->addr));
3661 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3662 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3663 u8 *rand192, *hash192;
3664 u8 status;
3665
3666 if (cp->addr.type != BDADDR_BREDR) {
3667 err = cmd_complete(sk, hdev->id,
3668 MGMT_OP_ADD_REMOTE_OOB_DATA,
3669 MGMT_STATUS_INVALID_PARAMS,
3670 &cp->addr, sizeof(cp->addr));
3671 goto unlock;
3672 }
3673
3674 if (bdaddr_type_is_le(cp->addr.type)) {
3675 rand192 = NULL;
3676 hash192 = NULL;
3677 } else {
3678 rand192 = cp->rand192;
3679 hash192 = cp->hash192;
3680 }
3681
3682 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3683 cp->addr.type, hash192, rand192,
3684 cp->hash256, cp->rand256);
3685 if (err < 0)
3686 status = MGMT_STATUS_FAILED;
3687 else
3688 status = MGMT_STATUS_SUCCESS;
3689
3690 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3691 status, &cp->addr, sizeof(cp->addr));
3692 } else {
3693 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3694 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3695 MGMT_STATUS_INVALID_PARAMS);
3696 }
3697
3698 unlock:
3699 hci_dev_unlock(hdev);
3700 return err;
3701 }
3702
3703 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3704 void *data, u16 len)
3705 {
3706 struct mgmt_cp_remove_remote_oob_data *cp = data;
3707 u8 status;
3708 int err;
3709
3710 BT_DBG("%s", hdev->name);
3711
3712 if (cp->addr.type != BDADDR_BREDR)
3713 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3714 MGMT_STATUS_INVALID_PARAMS,
3715 &cp->addr, sizeof(cp->addr));
3716
3717 hci_dev_lock(hdev);
3718
3719 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3720 hci_remote_oob_data_clear(hdev);
3721 status = MGMT_STATUS_SUCCESS;
3722 goto done;
3723 }
3724
3725 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3726 if (err < 0)
3727 status = MGMT_STATUS_INVALID_PARAMS;
3728 else
3729 status = MGMT_STATUS_SUCCESS;
3730
3731 done:
3732 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3733 status, &cp->addr, sizeof(cp->addr));
3734
3735 hci_dev_unlock(hdev);
3736 return err;
3737 }
3738
3739 static bool trigger_discovery(struct hci_request *req, u8 *status)
3740 {
3741 struct hci_dev *hdev = req->hdev;
3742 struct hci_cp_le_set_scan_param param_cp;
3743 struct hci_cp_le_set_scan_enable enable_cp;
3744 struct hci_cp_inquiry inq_cp;
3745 /* General inquiry access code (GIAC) */
3746 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3747 u8 own_addr_type;
3748 int err;
3749
3750 switch (hdev->discovery.type) {
3751 case DISCOV_TYPE_BREDR:
3752 *status = mgmt_bredr_support(hdev);
3753 if (*status)
3754 return false;
3755
3756 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3757 *status = MGMT_STATUS_BUSY;
3758 return false;
3759 }
3760
3761 hci_inquiry_cache_flush(hdev);
3762
3763 memset(&inq_cp, 0, sizeof(inq_cp));
3764 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3765 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3766 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3767 break;
3768
3769 case DISCOV_TYPE_LE:
3770 case DISCOV_TYPE_INTERLEAVED:
3771 *status = mgmt_le_support(hdev);
3772 if (*status)
3773 return false;
3774
3775 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3776 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3777 *status = MGMT_STATUS_NOT_SUPPORTED;
3778 return false;
3779 }
3780
3781 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3782 /* Don't let discovery abort an outgoing
3783 * connection attempt that's using directed
3784 * advertising.
3785 */
3786 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3787 BT_CONNECT)) {
3788 *status = MGMT_STATUS_REJECTED;
3789 return false;
3790 }
3791
3792 disable_advertising(req);
3793 }
3794
3795 /* If controller is scanning, it means the background scanning
3796 * is running. Thus, we should temporarily stop it in order to
3797 * set the discovery scanning parameters.
3798 */
3799 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3800 hci_req_add_le_scan_disable(req);
3801
3802 memset(&param_cp, 0, sizeof(param_cp));
3803
3804 /* All active scans will be done with either a resolvable
3805 * private address (when privacy feature has been enabled)
3806 * or non-resolvable private address.
3807 */
3808 err = hci_update_random_address(req, true, &own_addr_type);
3809 if (err < 0) {
3810 *status = MGMT_STATUS_FAILED;
3811 return false;
3812 }
3813
3814 param_cp.type = LE_SCAN_ACTIVE;
3815 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3816 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3817 param_cp.own_address_type = own_addr_type;
3818 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3819 &param_cp);
3820
3821 memset(&enable_cp, 0, sizeof(enable_cp));
3822 enable_cp.enable = LE_SCAN_ENABLE;
3823 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3824 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3825 &enable_cp);
3826 break;
3827
3828 default:
3829 *status = MGMT_STATUS_INVALID_PARAMS;
3830 return false;
3831 }
3832
3833 return true;
3834 }
3835
3836 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3837 {
3838 struct pending_cmd *cmd;
3839 unsigned long timeout;
3840
3841 BT_DBG("status %d", status);
3842
3843 hci_dev_lock(hdev);
3844
3845 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3846 if (!cmd)
3847 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3848
3849 if (cmd) {
3850 cmd->cmd_complete(cmd, mgmt_status(status));
3851 mgmt_pending_remove(cmd);
3852 }
3853
3854 if (status) {
3855 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3856 goto unlock;
3857 }
3858
3859 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3860
3861 switch (hdev->discovery.type) {
3862 case DISCOV_TYPE_LE:
3863 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3864 break;
3865 case DISCOV_TYPE_INTERLEAVED:
3866 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3867 break;
3868 case DISCOV_TYPE_BREDR:
3869 timeout = 0;
3870 break;
3871 default:
3872 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3873 timeout = 0;
3874 break;
3875 }
3876
3877 if (timeout)
3878 queue_delayed_work(hdev->workqueue,
3879 &hdev->le_scan_disable, timeout);
3880
3881 unlock:
3882 hci_dev_unlock(hdev);
3883 }
3884
3885 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3886 void *data, u16 len)
3887 {
3888 struct mgmt_cp_start_discovery *cp = data;
3889 struct pending_cmd *cmd;
3890 struct hci_request req;
3891 u8 status;
3892 int err;
3893
3894 BT_DBG("%s", hdev->name);
3895
3896 hci_dev_lock(hdev);
3897
3898 if (!hdev_is_powered(hdev)) {
3899 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3900 MGMT_STATUS_NOT_POWERED,
3901 &cp->type, sizeof(cp->type));
3902 goto failed;
3903 }
3904
3905 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3906 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3907 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3908 MGMT_STATUS_BUSY, &cp->type,
3909 sizeof(cp->type));
3910 goto failed;
3911 }
3912
3913 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3914 if (!cmd) {
3915 err = -ENOMEM;
3916 goto failed;
3917 }
3918
3919 cmd->cmd_complete = generic_cmd_complete;
3920
3921 /* Clear the discovery filter first to free any previously
3922 * allocated memory for the UUID list.
3923 */
3924 hci_discovery_filter_clear(hdev);
3925
3926 hdev->discovery.type = cp->type;
3927 hdev->discovery.report_invalid_rssi = false;
3928
3929 hci_req_init(&req, hdev);
3930
3931 if (!trigger_discovery(&req, &status)) {
3932 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3933 status, &cp->type, sizeof(cp->type));
3934 mgmt_pending_remove(cmd);
3935 goto failed;
3936 }
3937
3938 err = hci_req_run(&req, start_discovery_complete);
3939 if (err < 0) {
3940 mgmt_pending_remove(cmd);
3941 goto failed;
3942 }
3943
3944 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3945
3946 failed:
3947 hci_dev_unlock(hdev);
3948 return err;
3949 }
3950
3951 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3952 {
3953 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3954 }
3955
3956 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3958 {
3959 struct mgmt_cp_start_service_discovery *cp = data;
3960 struct pending_cmd *cmd;
3961 struct hci_request req;
3962 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3963 u16 uuid_count, expected_len;
3964 u8 status;
3965 int err;
3966
3967 BT_DBG("%s", hdev->name);
3968
3969 hci_dev_lock(hdev);
3970
3971 if (!hdev_is_powered(hdev)) {
3972 err = cmd_complete(sk, hdev->id,
3973 MGMT_OP_START_SERVICE_DISCOVERY,
3974 MGMT_STATUS_NOT_POWERED,
3975 &cp->type, sizeof(cp->type));
3976 goto failed;
3977 }
3978
3979 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3980 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3981 err = cmd_complete(sk, hdev->id,
3982 MGMT_OP_START_SERVICE_DISCOVERY,
3983 MGMT_STATUS_BUSY, &cp->type,
3984 sizeof(cp->type));
3985 goto failed;
3986 }
3987
3988 uuid_count = __le16_to_cpu(cp->uuid_count);
3989 if (uuid_count > max_uuid_count) {
3990 BT_ERR("service_discovery: too big uuid_count value %u",
3991 uuid_count);
3992 err = cmd_complete(sk, hdev->id,
3993 MGMT_OP_START_SERVICE_DISCOVERY,
3994 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3995 sizeof(cp->type));
3996 goto failed;
3997 }
3998
3999 expected_len = sizeof(*cp) + uuid_count * 16;
4000 if (expected_len != len) {
4001 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4002 expected_len, len);
4003 err = cmd_complete(sk, hdev->id,
4004 MGMT_OP_START_SERVICE_DISCOVERY,
4005 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4006 sizeof(cp->type));
4007 goto failed;
4008 }
4009
4010 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4011 hdev, data, len);
4012 if (!cmd) {
4013 err = -ENOMEM;
4014 goto failed;
4015 }
4016
4017 cmd->cmd_complete = service_discovery_cmd_complete;
4018
4019 /* Clear the discovery filter first to free any previously
4020 * allocated memory for the UUID list.
4021 */
4022 hci_discovery_filter_clear(hdev);
4023
4024 hdev->discovery.type = cp->type;
4025 hdev->discovery.rssi = cp->rssi;
4026 hdev->discovery.uuid_count = uuid_count;
4027
4028 if (uuid_count > 0) {
4029 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4030 GFP_KERNEL);
4031 if (!hdev->discovery.uuids) {
4032 err = cmd_complete(sk, hdev->id,
4033 MGMT_OP_START_SERVICE_DISCOVERY,
4034 MGMT_STATUS_FAILED,
4035 &cp->type, sizeof(cp->type));
4036 mgmt_pending_remove(cmd);
4037 goto failed;
4038 }
4039 }
4040
4041 hci_req_init(&req, hdev);
4042
4043 if (!trigger_discovery(&req, &status)) {
4044 err = cmd_complete(sk, hdev->id,
4045 MGMT_OP_START_SERVICE_DISCOVERY,
4046 status, &cp->type, sizeof(cp->type));
4047 mgmt_pending_remove(cmd);
4048 goto failed;
4049 }
4050
4051 err = hci_req_run(&req, start_discovery_complete);
4052 if (err < 0) {
4053 mgmt_pending_remove(cmd);
4054 goto failed;
4055 }
4056
4057 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4058
4059 failed:
4060 hci_dev_unlock(hdev);
4061 return err;
4062 }
4063
4064 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4065 {
4066 struct pending_cmd *cmd;
4067
4068 BT_DBG("status %d", status);
4069
4070 hci_dev_lock(hdev);
4071
4072 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4073 if (cmd) {
4074 cmd->cmd_complete(cmd, mgmt_status(status));
4075 mgmt_pending_remove(cmd);
4076 }
4077
4078 if (!status)
4079 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4080
4081 hci_dev_unlock(hdev);
4082 }
4083
4084 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4085 u16 len)
4086 {
4087 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4088 struct pending_cmd *cmd;
4089 struct hci_request req;
4090 int err;
4091
4092 BT_DBG("%s", hdev->name);
4093
4094 hci_dev_lock(hdev);
4095
4096 if (!hci_discovery_active(hdev)) {
4097 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4098 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4099 sizeof(mgmt_cp->type));
4100 goto unlock;
4101 }
4102
4103 if (hdev->discovery.type != mgmt_cp->type) {
4104 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4105 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4106 sizeof(mgmt_cp->type));
4107 goto unlock;
4108 }
4109
4110 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4111 if (!cmd) {
4112 err = -ENOMEM;
4113 goto unlock;
4114 }
4115
4116 cmd->cmd_complete = generic_cmd_complete;
4117
4118 hci_req_init(&req, hdev);
4119
4120 hci_stop_discovery(&req);
4121
4122 err = hci_req_run(&req, stop_discovery_complete);
4123 if (!err) {
4124 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4125 goto unlock;
4126 }
4127
4128 mgmt_pending_remove(cmd);
4129
4130 /* If no HCI commands were sent we're done */
4131 if (err == -ENODATA) {
4132 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4133 &mgmt_cp->type, sizeof(mgmt_cp->type));
4134 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4135 }
4136
4137 unlock:
4138 hci_dev_unlock(hdev);
4139 return err;
4140 }
4141
4142 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4143 u16 len)
4144 {
4145 struct mgmt_cp_confirm_name *cp = data;
4146 struct inquiry_entry *e;
4147 int err;
4148
4149 BT_DBG("%s", hdev->name);
4150
4151 hci_dev_lock(hdev);
4152
4153 if (!hci_discovery_active(hdev)) {
4154 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4155 MGMT_STATUS_FAILED, &cp->addr,
4156 sizeof(cp->addr));
4157 goto failed;
4158 }
4159
4160 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4161 if (!e) {
4162 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4163 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4164 sizeof(cp->addr));
4165 goto failed;
4166 }
4167
4168 if (cp->name_known) {
4169 e->name_state = NAME_KNOWN;
4170 list_del(&e->list);
4171 } else {
4172 e->name_state = NAME_NEEDED;
4173 hci_inquiry_cache_update_resolve(hdev, e);
4174 }
4175
4176 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4177 sizeof(cp->addr));
4178
4179 failed:
4180 hci_dev_unlock(hdev);
4181 return err;
4182 }
4183
4184 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4185 u16 len)
4186 {
4187 struct mgmt_cp_block_device *cp = data;
4188 u8 status;
4189 int err;
4190
4191 BT_DBG("%s", hdev->name);
4192
4193 if (!bdaddr_type_is_valid(cp->addr.type))
4194 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4195 MGMT_STATUS_INVALID_PARAMS,
4196 &cp->addr, sizeof(cp->addr));
4197
4198 hci_dev_lock(hdev);
4199
4200 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4201 cp->addr.type);
4202 if (err < 0) {
4203 status = MGMT_STATUS_FAILED;
4204 goto done;
4205 }
4206
4207 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4208 sk);
4209 status = MGMT_STATUS_SUCCESS;
4210
4211 done:
4212 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4213 &cp->addr, sizeof(cp->addr));
4214
4215 hci_dev_unlock(hdev);
4216
4217 return err;
4218 }
4219
4220 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4221 u16 len)
4222 {
4223 struct mgmt_cp_unblock_device *cp = data;
4224 u8 status;
4225 int err;
4226
4227 BT_DBG("%s", hdev->name);
4228
4229 if (!bdaddr_type_is_valid(cp->addr.type))
4230 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4231 MGMT_STATUS_INVALID_PARAMS,
4232 &cp->addr, sizeof(cp->addr));
4233
4234 hci_dev_lock(hdev);
4235
4236 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4237 cp->addr.type);
4238 if (err < 0) {
4239 status = MGMT_STATUS_INVALID_PARAMS;
4240 goto done;
4241 }
4242
4243 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4244 sk);
4245 status = MGMT_STATUS_SUCCESS;
4246
4247 done:
4248 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4249 &cp->addr, sizeof(cp->addr));
4250
4251 hci_dev_unlock(hdev);
4252
4253 return err;
4254 }
4255
4256 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4257 u16 len)
4258 {
4259 struct mgmt_cp_set_device_id *cp = data;
4260 struct hci_request req;
4261 int err;
4262 __u16 source;
4263
4264 BT_DBG("%s", hdev->name);
4265
4266 source = __le16_to_cpu(cp->source);
4267
4268 if (source > 0x0002)
4269 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4270 MGMT_STATUS_INVALID_PARAMS);
4271
4272 hci_dev_lock(hdev);
4273
4274 hdev->devid_source = source;
4275 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4276 hdev->devid_product = __le16_to_cpu(cp->product);
4277 hdev->devid_version = __le16_to_cpu(cp->version);
4278
4279 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4280
4281 hci_req_init(&req, hdev);
4282 update_eir(&req);
4283 hci_req_run(&req, NULL);
4284
4285 hci_dev_unlock(hdev);
4286
4287 return err;
4288 }
4289
4290 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4291 {
4292 struct cmd_lookup match = { NULL, hdev };
4293
4294 hci_dev_lock(hdev);
4295
4296 if (status) {
4297 u8 mgmt_err = mgmt_status(status);
4298
4299 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4300 cmd_status_rsp, &mgmt_err);
4301 goto unlock;
4302 }
4303
4304 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4305 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4306 else
4307 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4308
4309 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4310 &match);
4311
4312 new_settings(hdev, match.sk);
4313
4314 if (match.sk)
4315 sock_put(match.sk);
4316
4317 unlock:
4318 hci_dev_unlock(hdev);
4319 }
4320
4321 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4322 u16 len)
4323 {
4324 struct mgmt_mode *cp = data;
4325 struct pending_cmd *cmd;
4326 struct hci_request req;
4327 u8 val, enabled, status;
4328 int err;
4329
4330 BT_DBG("request for %s", hdev->name);
4331
4332 status = mgmt_le_support(hdev);
4333 if (status)
4334 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4335 status);
4336
4337 if (cp->val != 0x00 && cp->val != 0x01)
4338 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4339 MGMT_STATUS_INVALID_PARAMS);
4340
4341 hci_dev_lock(hdev);
4342
4343 val = !!cp->val;
4344 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4345
4346 /* The following conditions are ones which mean that we should
4347 * not do any HCI communication but directly send a mgmt
4348 * response to user space (after toggling the flag if
4349 * necessary).
4350 */
4351 if (!hdev_is_powered(hdev) || val == enabled ||
4352 hci_conn_num(hdev, LE_LINK) > 0 ||
4353 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4354 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4355 bool changed = false;
4356
4357 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4358 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4359 changed = true;
4360 }
4361
4362 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4363 if (err < 0)
4364 goto unlock;
4365
4366 if (changed)
4367 err = new_settings(hdev, sk);
4368
4369 goto unlock;
4370 }
4371
4372 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4373 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4374 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4375 MGMT_STATUS_BUSY);
4376 goto unlock;
4377 }
4378
4379 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4380 if (!cmd) {
4381 err = -ENOMEM;
4382 goto unlock;
4383 }
4384
4385 hci_req_init(&req, hdev);
4386
4387 if (val)
4388 enable_advertising(&req);
4389 else
4390 disable_advertising(&req);
4391
4392 err = hci_req_run(&req, set_advertising_complete);
4393 if (err < 0)
4394 mgmt_pending_remove(cmd);
4395
4396 unlock:
4397 hci_dev_unlock(hdev);
4398 return err;
4399 }
4400
4401 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4402 void *data, u16 len)
4403 {
4404 struct mgmt_cp_set_static_address *cp = data;
4405 int err;
4406
4407 BT_DBG("%s", hdev->name);
4408
4409 if (!lmp_le_capable(hdev))
4410 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4411 MGMT_STATUS_NOT_SUPPORTED);
4412
4413 if (hdev_is_powered(hdev))
4414 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4415 MGMT_STATUS_REJECTED);
4416
4417 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4418 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4419 return cmd_status(sk, hdev->id,
4420 MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_INVALID_PARAMS);
4422
4423 /* Two most significant bits shall be set */
4424 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4425 return cmd_status(sk, hdev->id,
4426 MGMT_OP_SET_STATIC_ADDRESS,
4427 MGMT_STATUS_INVALID_PARAMS);
4428 }
4429
4430 hci_dev_lock(hdev);
4431
4432 bacpy(&hdev->static_addr, &cp->bdaddr);
4433
4434 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4435
4436 hci_dev_unlock(hdev);
4437
4438 return err;
4439 }
4440
4441 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4442 void *data, u16 len)
4443 {
4444 struct mgmt_cp_set_scan_params *cp = data;
4445 __u16 interval, window;
4446 int err;
4447
4448 BT_DBG("%s", hdev->name);
4449
4450 if (!lmp_le_capable(hdev))
4451 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4452 MGMT_STATUS_NOT_SUPPORTED);
4453
4454 interval = __le16_to_cpu(cp->interval);
4455
4456 if (interval < 0x0004 || interval > 0x4000)
4457 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4458 MGMT_STATUS_INVALID_PARAMS);
4459
4460 window = __le16_to_cpu(cp->window);
4461
4462 if (window < 0x0004 || window > 0x4000)
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4464 MGMT_STATUS_INVALID_PARAMS);
4465
4466 if (window > interval)
4467 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4468 MGMT_STATUS_INVALID_PARAMS);
4469
4470 hci_dev_lock(hdev);
4471
4472 hdev->le_scan_interval = interval;
4473 hdev->le_scan_window = window;
4474
4475 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4476
4477 /* If background scan is running, restart it so new parameters are
4478 * loaded.
4479 */
4480 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4481 hdev->discovery.state == DISCOVERY_STOPPED) {
4482 struct hci_request req;
4483
4484 hci_req_init(&req, hdev);
4485
4486 hci_req_add_le_scan_disable(&req);
4487 hci_req_add_le_passive_scan(&req);
4488
4489 hci_req_run(&req, NULL);
4490 }
4491
4492 hci_dev_unlock(hdev);
4493
4494 return err;
4495 }
4496
4497 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4498 {
4499 struct pending_cmd *cmd;
4500
4501 BT_DBG("status 0x%02x", status);
4502
4503 hci_dev_lock(hdev);
4504
4505 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4506 if (!cmd)
4507 goto unlock;
4508
4509 if (status) {
4510 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4511 mgmt_status(status));
4512 } else {
4513 struct mgmt_mode *cp = cmd->param;
4514
4515 if (cp->val)
4516 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4517 else
4518 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4519
4520 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4521 new_settings(hdev, cmd->sk);
4522 }
4523
4524 mgmt_pending_remove(cmd);
4525
4526 unlock:
4527 hci_dev_unlock(hdev);
4528 }
4529
4530 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4531 void *data, u16 len)
4532 {
4533 struct mgmt_mode *cp = data;
4534 struct pending_cmd *cmd;
4535 struct hci_request req;
4536 int err;
4537
4538 BT_DBG("%s", hdev->name);
4539
4540 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4541 hdev->hci_ver < BLUETOOTH_VER_1_2)
4542 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4543 MGMT_STATUS_NOT_SUPPORTED);
4544
4545 if (cp->val != 0x00 && cp->val != 0x01)
4546 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4547 MGMT_STATUS_INVALID_PARAMS);
4548
4549 if (!hdev_is_powered(hdev))
4550 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4551 MGMT_STATUS_NOT_POWERED);
4552
4553 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4554 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4555 MGMT_STATUS_REJECTED);
4556
4557 hci_dev_lock(hdev);
4558
4559 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4560 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4561 MGMT_STATUS_BUSY);
4562 goto unlock;
4563 }
4564
4565 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4566 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4567 hdev);
4568 goto unlock;
4569 }
4570
4571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4572 data, len);
4573 if (!cmd) {
4574 err = -ENOMEM;
4575 goto unlock;
4576 }
4577
4578 hci_req_init(&req, hdev);
4579
4580 write_fast_connectable(&req, cp->val);
4581
4582 err = hci_req_run(&req, fast_connectable_complete);
4583 if (err < 0) {
4584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4585 MGMT_STATUS_FAILED);
4586 mgmt_pending_remove(cmd);
4587 }
4588
4589 unlock:
4590 hci_dev_unlock(hdev);
4591
4592 return err;
4593 }
4594
4595 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4596 {
4597 struct pending_cmd *cmd;
4598
4599 BT_DBG("status 0x%02x", status);
4600
4601 hci_dev_lock(hdev);
4602
4603 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4604 if (!cmd)
4605 goto unlock;
4606
4607 if (status) {
4608 u8 mgmt_err = mgmt_status(status);
4609
4610 /* We need to restore the flag if related HCI commands
4611 * failed.
4612 */
4613 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4614
4615 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4616 } else {
4617 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4618 new_settings(hdev, cmd->sk);
4619 }
4620
4621 mgmt_pending_remove(cmd);
4622
4623 unlock:
4624 hci_dev_unlock(hdev);
4625 }
4626
4627 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4628 {
4629 struct mgmt_mode *cp = data;
4630 struct pending_cmd *cmd;
4631 struct hci_request req;
4632 int err;
4633
4634 BT_DBG("request for %s", hdev->name);
4635
4636 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4637 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4638 MGMT_STATUS_NOT_SUPPORTED);
4639
4640 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4641 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4642 MGMT_STATUS_REJECTED);
4643
4644 if (cp->val != 0x00 && cp->val != 0x01)
4645 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4646 MGMT_STATUS_INVALID_PARAMS);
4647
4648 hci_dev_lock(hdev);
4649
4650 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4651 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4652 goto unlock;
4653 }
4654
4655 if (!hdev_is_powered(hdev)) {
4656 if (!cp->val) {
4657 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4658 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4659 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4660 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4661 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4662 }
4663
4664 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4665
4666 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4667 if (err < 0)
4668 goto unlock;
4669
4670 err = new_settings(hdev, sk);
4671 goto unlock;
4672 }
4673
4674 /* Reject disabling when powered on */
4675 if (!cp->val) {
4676 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4677 MGMT_STATUS_REJECTED);
4678 goto unlock;
4679 }
4680
4681 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4682 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4683 MGMT_STATUS_BUSY);
4684 goto unlock;
4685 }
4686
4687 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4688 if (!cmd) {
4689 err = -ENOMEM;
4690 goto unlock;
4691 }
4692
4693 /* We need to flip the bit already here so that update_adv_data
4694 * generates the correct flags.
4695 */
4696 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4697
4698 hci_req_init(&req, hdev);
4699
4700 write_fast_connectable(&req, false);
4701 __hci_update_page_scan(&req);
4702
4703 /* Since only the advertising data flags will change, there
4704 * is no need to update the scan response data.
4705 */
4706 update_adv_data(&req);
4707
4708 err = hci_req_run(&req, set_bredr_complete);
4709 if (err < 0)
4710 mgmt_pending_remove(cmd);
4711
4712 unlock:
4713 hci_dev_unlock(hdev);
4714 return err;
4715 }
4716
4717 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4718 void *data, u16 len)
4719 {
4720 struct mgmt_mode *cp = data;
4721 struct pending_cmd *cmd;
4722 u8 val;
4723 int err;
4724
4725 BT_DBG("request for %s", hdev->name);
4726
4727 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4728 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4729 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4730 MGMT_STATUS_NOT_SUPPORTED);
4731
4732 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4733 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4734 MGMT_STATUS_INVALID_PARAMS);
4735
4736 hci_dev_lock(hdev);
4737
4738 if (!hdev_is_powered(hdev) ||
4739 (!lmp_sc_capable(hdev) &&
4740 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4741 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4742 bool changed;
4743
4744 if (cp->val) {
4745 changed = !test_and_set_bit(HCI_SC_ENABLED,
4746 &hdev->dev_flags);
4747 if (cp->val == 0x02)
4748 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4749 else
4750 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4751 } else {
4752 changed = test_and_clear_bit(HCI_SC_ENABLED,
4753 &hdev->dev_flags);
4754 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4755 }
4756
4757 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4758 if (err < 0)
4759 goto failed;
4760
4761 if (changed)
4762 err = new_settings(hdev, sk);
4763
4764 goto failed;
4765 }
4766
4767 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4768 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4769 MGMT_STATUS_BUSY);
4770 goto failed;
4771 }
4772
4773 val = !!cp->val;
4774
4775 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4776 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4777 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4778 goto failed;
4779 }
4780
4781 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4782 if (!cmd) {
4783 err = -ENOMEM;
4784 goto failed;
4785 }
4786
4787 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4788 if (err < 0) {
4789 mgmt_pending_remove(cmd);
4790 goto failed;
4791 }
4792
4793 if (cp->val == 0x02)
4794 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4795 else
4796 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4797
4798 failed:
4799 hci_dev_unlock(hdev);
4800 return err;
4801 }
4802
4803 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4804 void *data, u16 len)
4805 {
4806 struct mgmt_mode *cp = data;
4807 bool changed, use_changed;
4808 int err;
4809
4810 BT_DBG("request for %s", hdev->name);
4811
4812 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4813 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4814 MGMT_STATUS_INVALID_PARAMS);
4815
4816 hci_dev_lock(hdev);
4817
4818 if (cp->val)
4819 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4820 &hdev->dev_flags);
4821 else
4822 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4823 &hdev->dev_flags);
4824
4825 if (cp->val == 0x02)
4826 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4827 &hdev->dev_flags);
4828 else
4829 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4830 &hdev->dev_flags);
4831
4832 if (hdev_is_powered(hdev) && use_changed &&
4833 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4834 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4835 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4836 sizeof(mode), &mode);
4837 }
4838
4839 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4840 if (err < 0)
4841 goto unlock;
4842
4843 if (changed)
4844 err = new_settings(hdev, sk);
4845
4846 unlock:
4847 hci_dev_unlock(hdev);
4848 return err;
4849 }
4850
4851 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4852 u16 len)
4853 {
4854 struct mgmt_cp_set_privacy *cp = cp_data;
4855 bool changed;
4856 int err;
4857
4858 BT_DBG("request for %s", hdev->name);
4859
4860 if (!lmp_le_capable(hdev))
4861 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4862 MGMT_STATUS_NOT_SUPPORTED);
4863
4864 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4866 MGMT_STATUS_INVALID_PARAMS);
4867
4868 if (hdev_is_powered(hdev))
4869 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4870 MGMT_STATUS_REJECTED);
4871
4872 hci_dev_lock(hdev);
4873
4874 /* If user space supports this command it is also expected to
4875 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4876 */
4877 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4878
4879 if (cp->privacy) {
4880 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4881 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4882 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4883 } else {
4884 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4885 memset(hdev->irk, 0, sizeof(hdev->irk));
4886 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4887 }
4888
4889 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4890 if (err < 0)
4891 goto unlock;
4892
4893 if (changed)
4894 err = new_settings(hdev, sk);
4895
4896 unlock:
4897 hci_dev_unlock(hdev);
4898 return err;
4899 }
4900
4901 static bool irk_is_valid(struct mgmt_irk_info *irk)
4902 {
4903 switch (irk->addr.type) {
4904 case BDADDR_LE_PUBLIC:
4905 return true;
4906
4907 case BDADDR_LE_RANDOM:
4908 /* Two most significant bits shall be set */
4909 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4910 return false;
4911 return true;
4912 }
4913
4914 return false;
4915 }
4916
4917 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4918 u16 len)
4919 {
4920 struct mgmt_cp_load_irks *cp = cp_data;
4921 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4922 sizeof(struct mgmt_irk_info));
4923 u16 irk_count, expected_len;
4924 int i, err;
4925
4926 BT_DBG("request for %s", hdev->name);
4927
4928 if (!lmp_le_capable(hdev))
4929 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4930 MGMT_STATUS_NOT_SUPPORTED);
4931
4932 irk_count = __le16_to_cpu(cp->irk_count);
4933 if (irk_count > max_irk_count) {
4934 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4935 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4936 MGMT_STATUS_INVALID_PARAMS);
4937 }
4938
4939 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4940 if (expected_len != len) {
4941 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4942 expected_len, len);
4943 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4944 MGMT_STATUS_INVALID_PARAMS);
4945 }
4946
4947 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4948
4949 for (i = 0; i < irk_count; i++) {
4950 struct mgmt_irk_info *key = &cp->irks[i];
4951
4952 if (!irk_is_valid(key))
4953 return cmd_status(sk, hdev->id,
4954 MGMT_OP_LOAD_IRKS,
4955 MGMT_STATUS_INVALID_PARAMS);
4956 }
4957
4958 hci_dev_lock(hdev);
4959
4960 hci_smp_irks_clear(hdev);
4961
4962 for (i = 0; i < irk_count; i++) {
4963 struct mgmt_irk_info *irk = &cp->irks[i];
4964 u8 addr_type;
4965
4966 if (irk->addr.type == BDADDR_LE_PUBLIC)
4967 addr_type = ADDR_LE_DEV_PUBLIC;
4968 else
4969 addr_type = ADDR_LE_DEV_RANDOM;
4970
4971 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4972 BDADDR_ANY);
4973 }
4974
4975 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4976
4977 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4978
4979 hci_dev_unlock(hdev);
4980
4981 return err;
4982 }
4983
4984 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4985 {
4986 if (key->master != 0x00 && key->master != 0x01)
4987 return false;
4988
4989 switch (key->addr.type) {
4990 case BDADDR_LE_PUBLIC:
4991 return true;
4992
4993 case BDADDR_LE_RANDOM:
4994 /* Two most significant bits shall be set */
4995 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4996 return false;
4997 return true;
4998 }
4999
5000 return false;
5001 }
5002
5003 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5004 void *cp_data, u16 len)
5005 {
5006 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5007 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5008 sizeof(struct mgmt_ltk_info));
5009 u16 key_count, expected_len;
5010 int i, err;
5011
5012 BT_DBG("request for %s", hdev->name);
5013
5014 if (!lmp_le_capable(hdev))
5015 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5016 MGMT_STATUS_NOT_SUPPORTED);
5017
5018 key_count = __le16_to_cpu(cp->key_count);
5019 if (key_count > max_key_count) {
5020 BT_ERR("load_ltks: too big key_count value %u", key_count);
5021 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5022 MGMT_STATUS_INVALID_PARAMS);
5023 }
5024
5025 expected_len = sizeof(*cp) + key_count *
5026 sizeof(struct mgmt_ltk_info);
5027 if (expected_len != len) {
5028 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5029 expected_len, len);
5030 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5031 MGMT_STATUS_INVALID_PARAMS);
5032 }
5033
5034 BT_DBG("%s key_count %u", hdev->name, key_count);
5035
5036 for (i = 0; i < key_count; i++) {
5037 struct mgmt_ltk_info *key = &cp->keys[i];
5038
5039 if (!ltk_is_valid(key))
5040 return cmd_status(sk, hdev->id,
5041 MGMT_OP_LOAD_LONG_TERM_KEYS,
5042 MGMT_STATUS_INVALID_PARAMS);
5043 }
5044
5045 hci_dev_lock(hdev);
5046
5047 hci_smp_ltks_clear(hdev);
5048
5049 for (i = 0; i < key_count; i++) {
5050 struct mgmt_ltk_info *key = &cp->keys[i];
5051 u8 type, addr_type, authenticated;
5052
5053 if (key->addr.type == BDADDR_LE_PUBLIC)
5054 addr_type = ADDR_LE_DEV_PUBLIC;
5055 else
5056 addr_type = ADDR_LE_DEV_RANDOM;
5057
5058 switch (key->type) {
5059 case MGMT_LTK_UNAUTHENTICATED:
5060 authenticated = 0x00;
5061 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5062 break;
5063 case MGMT_LTK_AUTHENTICATED:
5064 authenticated = 0x01;
5065 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5066 break;
5067 case MGMT_LTK_P256_UNAUTH:
5068 authenticated = 0x00;
5069 type = SMP_LTK_P256;
5070 break;
5071 case MGMT_LTK_P256_AUTH:
5072 authenticated = 0x01;
5073 type = SMP_LTK_P256;
5074 break;
5075 case MGMT_LTK_P256_DEBUG:
5076 authenticated = 0x00;
5077 type = SMP_LTK_P256_DEBUG;
5078 default:
5079 continue;
5080 }
5081
5082 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5083 authenticated, key->val, key->enc_size, key->ediv,
5084 key->rand);
5085 }
5086
5087 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5088 NULL, 0);
5089
5090 hci_dev_unlock(hdev);
5091
5092 return err;
5093 }
5094
5095 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5096 {
5097 struct hci_conn *conn = cmd->user_data;
5098 struct mgmt_rp_get_conn_info rp;
5099
5100 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5101
5102 if (status == MGMT_STATUS_SUCCESS) {
5103 rp.rssi = conn->rssi;
5104 rp.tx_power = conn->tx_power;
5105 rp.max_tx_power = conn->max_tx_power;
5106 } else {
5107 rp.rssi = HCI_RSSI_INVALID;
5108 rp.tx_power = HCI_TX_POWER_INVALID;
5109 rp.max_tx_power = HCI_TX_POWER_INVALID;
5110 }
5111
5112 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5113 &rp, sizeof(rp));
5114
5115 hci_conn_drop(conn);
5116 hci_conn_put(conn);
5117 }
5118
5119 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5120 {
5121 struct hci_cp_read_rssi *cp;
5122 struct pending_cmd *cmd;
5123 struct hci_conn *conn;
5124 u16 handle;
5125 u8 status;
5126
5127 BT_DBG("status 0x%02x", hci_status);
5128
5129 hci_dev_lock(hdev);
5130
5131 /* Commands sent in request are either Read RSSI or Read Transmit Power
5132 * Level so we check which one was last sent to retrieve connection
5133 * handle. Both commands have handle as first parameter so it's safe to
5134 * cast data on the same command struct.
5135 *
5136 * First command sent is always Read RSSI and we fail only if it fails.
5137 * In other case we simply override error to indicate success as we
5138 * already remembered if TX power value is actually valid.
5139 */
5140 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5141 if (!cp) {
5142 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5143 status = MGMT_STATUS_SUCCESS;
5144 } else {
5145 status = mgmt_status(hci_status);
5146 }
5147
5148 if (!cp) {
5149 BT_ERR("invalid sent_cmd in conn_info response");
5150 goto unlock;
5151 }
5152
5153 handle = __le16_to_cpu(cp->handle);
5154 conn = hci_conn_hash_lookup_handle(hdev, handle);
5155 if (!conn) {
5156 BT_ERR("unknown handle (%d) in conn_info response", handle);
5157 goto unlock;
5158 }
5159
5160 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5161 if (!cmd)
5162 goto unlock;
5163
5164 cmd->cmd_complete(cmd, status);
5165 mgmt_pending_remove(cmd);
5166
5167 unlock:
5168 hci_dev_unlock(hdev);
5169 }
5170
5171 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5172 u16 len)
5173 {
5174 struct mgmt_cp_get_conn_info *cp = data;
5175 struct mgmt_rp_get_conn_info rp;
5176 struct hci_conn *conn;
5177 unsigned long conn_info_age;
5178 int err = 0;
5179
5180 BT_DBG("%s", hdev->name);
5181
5182 memset(&rp, 0, sizeof(rp));
5183 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5184 rp.addr.type = cp->addr.type;
5185
5186 if (!bdaddr_type_is_valid(cp->addr.type))
5187 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5188 MGMT_STATUS_INVALID_PARAMS,
5189 &rp, sizeof(rp));
5190
5191 hci_dev_lock(hdev);
5192
5193 if (!hdev_is_powered(hdev)) {
5194 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5195 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5196 goto unlock;
5197 }
5198
5199 if (cp->addr.type == BDADDR_BREDR)
5200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5201 &cp->addr.bdaddr);
5202 else
5203 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5204
5205 if (!conn || conn->state != BT_CONNECTED) {
5206 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5207 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5208 goto unlock;
5209 }
5210
5211 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5212 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5213 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5214 goto unlock;
5215 }
5216
5217 /* To avoid client trying to guess when to poll again for information we
5218 * calculate conn info age as random value between min/max set in hdev.
5219 */
5220 conn_info_age = hdev->conn_info_min_age +
5221 prandom_u32_max(hdev->conn_info_max_age -
5222 hdev->conn_info_min_age);
5223
5224 /* Query controller to refresh cached values if they are too old or were
5225 * never read.
5226 */
5227 if (time_after(jiffies, conn->conn_info_timestamp +
5228 msecs_to_jiffies(conn_info_age)) ||
5229 !conn->conn_info_timestamp) {
5230 struct hci_request req;
5231 struct hci_cp_read_tx_power req_txp_cp;
5232 struct hci_cp_read_rssi req_rssi_cp;
5233 struct pending_cmd *cmd;
5234
5235 hci_req_init(&req, hdev);
5236 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5237 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5238 &req_rssi_cp);
5239
5240 /* For LE links TX power does not change thus we don't need to
5241 * query for it once value is known.
5242 */
5243 if (!bdaddr_type_is_le(cp->addr.type) ||
5244 conn->tx_power == HCI_TX_POWER_INVALID) {
5245 req_txp_cp.handle = cpu_to_le16(conn->handle);
5246 req_txp_cp.type = 0x00;
5247 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5248 sizeof(req_txp_cp), &req_txp_cp);
5249 }
5250
5251 /* Max TX power needs to be read only once per connection */
5252 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5253 req_txp_cp.handle = cpu_to_le16(conn->handle);
5254 req_txp_cp.type = 0x01;
5255 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5256 sizeof(req_txp_cp), &req_txp_cp);
5257 }
5258
5259 err = hci_req_run(&req, conn_info_refresh_complete);
5260 if (err < 0)
5261 goto unlock;
5262
5263 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5264 data, len);
5265 if (!cmd) {
5266 err = -ENOMEM;
5267 goto unlock;
5268 }
5269
5270 hci_conn_hold(conn);
5271 cmd->user_data = hci_conn_get(conn);
5272 cmd->cmd_complete = conn_info_cmd_complete;
5273
5274 conn->conn_info_timestamp = jiffies;
5275 } else {
5276 /* Cache is valid, just reply with values cached in hci_conn */
5277 rp.rssi = conn->rssi;
5278 rp.tx_power = conn->tx_power;
5279 rp.max_tx_power = conn->max_tx_power;
5280
5281 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5282 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5283 }
5284
5285 unlock:
5286 hci_dev_unlock(hdev);
5287 return err;
5288 }
5289
5290 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5291 {
5292 struct hci_conn *conn = cmd->user_data;
5293 struct mgmt_rp_get_clock_info rp;
5294 struct hci_dev *hdev;
5295
5296 memset(&rp, 0, sizeof(rp));
5297 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5298
5299 if (status)
5300 goto complete;
5301
5302 hdev = hci_dev_get(cmd->index);
5303 if (hdev) {
5304 rp.local_clock = cpu_to_le32(hdev->clock);
5305 hci_dev_put(hdev);
5306 }
5307
5308 if (conn) {
5309 rp.piconet_clock = cpu_to_le32(conn->clock);
5310 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5311 }
5312
5313 complete:
5314 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5315
5316 if (conn) {
5317 hci_conn_drop(conn);
5318 hci_conn_put(conn);
5319 }
5320 }
5321
5322 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5323 {
5324 struct hci_cp_read_clock *hci_cp;
5325 struct pending_cmd *cmd;
5326 struct hci_conn *conn;
5327
5328 BT_DBG("%s status %u", hdev->name, status);
5329
5330 hci_dev_lock(hdev);
5331
5332 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5333 if (!hci_cp)
5334 goto unlock;
5335
5336 if (hci_cp->which) {
5337 u16 handle = __le16_to_cpu(hci_cp->handle);
5338 conn = hci_conn_hash_lookup_handle(hdev, handle);
5339 } else {
5340 conn = NULL;
5341 }
5342
5343 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5344 if (!cmd)
5345 goto unlock;
5346
5347 cmd->cmd_complete(cmd, mgmt_status(status));
5348 mgmt_pending_remove(cmd);
5349
5350 unlock:
5351 hci_dev_unlock(hdev);
5352 }
5353
5354 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5355 u16 len)
5356 {
5357 struct mgmt_cp_get_clock_info *cp = data;
5358 struct mgmt_rp_get_clock_info rp;
5359 struct hci_cp_read_clock hci_cp;
5360 struct pending_cmd *cmd;
5361 struct hci_request req;
5362 struct hci_conn *conn;
5363 int err;
5364
5365 BT_DBG("%s", hdev->name);
5366
5367 memset(&rp, 0, sizeof(rp));
5368 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5369 rp.addr.type = cp->addr.type;
5370
5371 if (cp->addr.type != BDADDR_BREDR)
5372 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5373 MGMT_STATUS_INVALID_PARAMS,
5374 &rp, sizeof(rp));
5375
5376 hci_dev_lock(hdev);
5377
5378 if (!hdev_is_powered(hdev)) {
5379 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5380 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5381 goto unlock;
5382 }
5383
5384 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5385 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5386 &cp->addr.bdaddr);
5387 if (!conn || conn->state != BT_CONNECTED) {
5388 err = cmd_complete(sk, hdev->id,
5389 MGMT_OP_GET_CLOCK_INFO,
5390 MGMT_STATUS_NOT_CONNECTED,
5391 &rp, sizeof(rp));
5392 goto unlock;
5393 }
5394 } else {
5395 conn = NULL;
5396 }
5397
5398 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5399 if (!cmd) {
5400 err = -ENOMEM;
5401 goto unlock;
5402 }
5403
5404 cmd->cmd_complete = clock_info_cmd_complete;
5405
5406 hci_req_init(&req, hdev);
5407
5408 memset(&hci_cp, 0, sizeof(hci_cp));
5409 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5410
5411 if (conn) {
5412 hci_conn_hold(conn);
5413 cmd->user_data = hci_conn_get(conn);
5414
5415 hci_cp.handle = cpu_to_le16(conn->handle);
5416 hci_cp.which = 0x01; /* Piconet clock */
5417 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5418 }
5419
5420 err = hci_req_run(&req, get_clock_info_complete);
5421 if (err < 0)
5422 mgmt_pending_remove(cmd);
5423
5424 unlock:
5425 hci_dev_unlock(hdev);
5426 return err;
5427 }
5428
5429 static void device_added(struct sock *sk, struct hci_dev *hdev,
5430 bdaddr_t *bdaddr, u8 type, u8 action)
5431 {
5432 struct mgmt_ev_device_added ev;
5433
5434 bacpy(&ev.addr.bdaddr, bdaddr);
5435 ev.addr.type = type;
5436 ev.action = action;
5437
5438 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5439 }
5440
5441 static int add_device(struct sock *sk, struct hci_dev *hdev,
5442 void *data, u16 len)
5443 {
5444 struct mgmt_cp_add_device *cp = data;
5445 u8 auto_conn, addr_type;
5446 int err;
5447
5448 BT_DBG("%s", hdev->name);
5449
5450 if (!bdaddr_type_is_valid(cp->addr.type) ||
5451 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5452 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5453 MGMT_STATUS_INVALID_PARAMS,
5454 &cp->addr, sizeof(cp->addr));
5455
5456 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5457 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5458 MGMT_STATUS_INVALID_PARAMS,
5459 &cp->addr, sizeof(cp->addr));
5460
5461 hci_dev_lock(hdev);
5462
5463 if (cp->addr.type == BDADDR_BREDR) {
5464 /* Only incoming connections action is supported for now */
5465 if (cp->action != 0x01) {
5466 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5467 MGMT_STATUS_INVALID_PARAMS,
5468 &cp->addr, sizeof(cp->addr));
5469 goto unlock;
5470 }
5471
5472 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5473 cp->addr.type);
5474 if (err)
5475 goto unlock;
5476
5477 hci_update_page_scan(hdev);
5478
5479 goto added;
5480 }
5481
5482 if (cp->addr.type == BDADDR_LE_PUBLIC)
5483 addr_type = ADDR_LE_DEV_PUBLIC;
5484 else
5485 addr_type = ADDR_LE_DEV_RANDOM;
5486
5487 if (cp->action == 0x02)
5488 auto_conn = HCI_AUTO_CONN_ALWAYS;
5489 else if (cp->action == 0x01)
5490 auto_conn = HCI_AUTO_CONN_DIRECT;
5491 else
5492 auto_conn = HCI_AUTO_CONN_REPORT;
5493
5494 /* If the connection parameters don't exist for this device,
5495 * they will be created and configured with defaults.
5496 */
5497 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5498 auto_conn) < 0) {
5499 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5500 MGMT_STATUS_FAILED,
5501 &cp->addr, sizeof(cp->addr));
5502 goto unlock;
5503 }
5504
5505 added:
5506 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5507
5508 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5509 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5510
5511 unlock:
5512 hci_dev_unlock(hdev);
5513 return err;
5514 }
5515
5516 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5517 bdaddr_t *bdaddr, u8 type)
5518 {
5519 struct mgmt_ev_device_removed ev;
5520
5521 bacpy(&ev.addr.bdaddr, bdaddr);
5522 ev.addr.type = type;
5523
5524 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5525 }
5526
5527 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5528 void *data, u16 len)
5529 {
5530 struct mgmt_cp_remove_device *cp = data;
5531 int err;
5532
5533 BT_DBG("%s", hdev->name);
5534
5535 hci_dev_lock(hdev);
5536
5537 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5538 struct hci_conn_params *params;
5539 u8 addr_type;
5540
5541 if (!bdaddr_type_is_valid(cp->addr.type)) {
5542 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5543 MGMT_STATUS_INVALID_PARAMS,
5544 &cp->addr, sizeof(cp->addr));
5545 goto unlock;
5546 }
5547
5548 if (cp->addr.type == BDADDR_BREDR) {
5549 err = hci_bdaddr_list_del(&hdev->whitelist,
5550 &cp->addr.bdaddr,
5551 cp->addr.type);
5552 if (err) {
5553 err = cmd_complete(sk, hdev->id,
5554 MGMT_OP_REMOVE_DEVICE,
5555 MGMT_STATUS_INVALID_PARAMS,
5556 &cp->addr, sizeof(cp->addr));
5557 goto unlock;
5558 }
5559
5560 hci_update_page_scan(hdev);
5561
5562 device_removed(sk, hdev, &cp->addr.bdaddr,
5563 cp->addr.type);
5564 goto complete;
5565 }
5566
5567 if (cp->addr.type == BDADDR_LE_PUBLIC)
5568 addr_type = ADDR_LE_DEV_PUBLIC;
5569 else
5570 addr_type = ADDR_LE_DEV_RANDOM;
5571
5572 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5573 addr_type);
5574 if (!params) {
5575 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5576 MGMT_STATUS_INVALID_PARAMS,
5577 &cp->addr, sizeof(cp->addr));
5578 goto unlock;
5579 }
5580
5581 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5583 MGMT_STATUS_INVALID_PARAMS,
5584 &cp->addr, sizeof(cp->addr));
5585 goto unlock;
5586 }
5587
5588 list_del(&params->action);
5589 list_del(&params->list);
5590 kfree(params);
5591 hci_update_background_scan(hdev);
5592
5593 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5594 } else {
5595 struct hci_conn_params *p, *tmp;
5596 struct bdaddr_list *b, *btmp;
5597
5598 if (cp->addr.type) {
5599 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5600 MGMT_STATUS_INVALID_PARAMS,
5601 &cp->addr, sizeof(cp->addr));
5602 goto unlock;
5603 }
5604
5605 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5606 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5607 list_del(&b->list);
5608 kfree(b);
5609 }
5610
5611 hci_update_page_scan(hdev);
5612
5613 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5614 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5615 continue;
5616 device_removed(sk, hdev, &p->addr, p->addr_type);
5617 list_del(&p->action);
5618 list_del(&p->list);
5619 kfree(p);
5620 }
5621
5622 BT_DBG("All LE connection parameters were removed");
5623
5624 hci_update_background_scan(hdev);
5625 }
5626
5627 complete:
5628 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5629 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5630
5631 unlock:
5632 hci_dev_unlock(hdev);
5633 return err;
5634 }
5635
5636 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5637 u16 len)
5638 {
5639 struct mgmt_cp_load_conn_param *cp = data;
5640 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5641 sizeof(struct mgmt_conn_param));
5642 u16 param_count, expected_len;
5643 int i;
5644
5645 if (!lmp_le_capable(hdev))
5646 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5647 MGMT_STATUS_NOT_SUPPORTED);
5648
5649 param_count = __le16_to_cpu(cp->param_count);
5650 if (param_count > max_param_count) {
5651 BT_ERR("load_conn_param: too big param_count value %u",
5652 param_count);
5653 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5654 MGMT_STATUS_INVALID_PARAMS);
5655 }
5656
5657 expected_len = sizeof(*cp) + param_count *
5658 sizeof(struct mgmt_conn_param);
5659 if (expected_len != len) {
5660 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5661 expected_len, len);
5662 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5663 MGMT_STATUS_INVALID_PARAMS);
5664 }
5665
5666 BT_DBG("%s param_count %u", hdev->name, param_count);
5667
5668 hci_dev_lock(hdev);
5669
5670 hci_conn_params_clear_disabled(hdev);
5671
5672 for (i = 0; i < param_count; i++) {
5673 struct mgmt_conn_param *param = &cp->params[i];
5674 struct hci_conn_params *hci_param;
5675 u16 min, max, latency, timeout;
5676 u8 addr_type;
5677
5678 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5679 param->addr.type);
5680
5681 if (param->addr.type == BDADDR_LE_PUBLIC) {
5682 addr_type = ADDR_LE_DEV_PUBLIC;
5683 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5684 addr_type = ADDR_LE_DEV_RANDOM;
5685 } else {
5686 BT_ERR("Ignoring invalid connection parameters");
5687 continue;
5688 }
5689
5690 min = le16_to_cpu(param->min_interval);
5691 max = le16_to_cpu(param->max_interval);
5692 latency = le16_to_cpu(param->latency);
5693 timeout = le16_to_cpu(param->timeout);
5694
5695 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5696 min, max, latency, timeout);
5697
5698 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5699 BT_ERR("Ignoring invalid connection parameters");
5700 continue;
5701 }
5702
5703 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5704 addr_type);
5705 if (!hci_param) {
5706 BT_ERR("Failed to add connection parameters");
5707 continue;
5708 }
5709
5710 hci_param->conn_min_interval = min;
5711 hci_param->conn_max_interval = max;
5712 hci_param->conn_latency = latency;
5713 hci_param->supervision_timeout = timeout;
5714 }
5715
5716 hci_dev_unlock(hdev);
5717
5718 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5719 }
5720
5721 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5722 void *data, u16 len)
5723 {
5724 struct mgmt_cp_set_external_config *cp = data;
5725 bool changed;
5726 int err;
5727
5728 BT_DBG("%s", hdev->name);
5729
5730 if (hdev_is_powered(hdev))
5731 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5732 MGMT_STATUS_REJECTED);
5733
5734 if (cp->config != 0x00 && cp->config != 0x01)
5735 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5736 MGMT_STATUS_INVALID_PARAMS);
5737
5738 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5739 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5740 MGMT_STATUS_NOT_SUPPORTED);
5741
5742 hci_dev_lock(hdev);
5743
5744 if (cp->config)
5745 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5746 &hdev->dev_flags);
5747 else
5748 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5749 &hdev->dev_flags);
5750
5751 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5752 if (err < 0)
5753 goto unlock;
5754
5755 if (!changed)
5756 goto unlock;
5757
5758 err = new_options(hdev, sk);
5759
5760 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5761 mgmt_index_removed(hdev);
5762
5763 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5764 set_bit(HCI_CONFIG, &hdev->dev_flags);
5765 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5766
5767 queue_work(hdev->req_workqueue, &hdev->power_on);
5768 } else {
5769 set_bit(HCI_RAW, &hdev->flags);
5770 mgmt_index_added(hdev);
5771 }
5772 }
5773
5774 unlock:
5775 hci_dev_unlock(hdev);
5776 return err;
5777 }
5778
5779 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5780 void *data, u16 len)
5781 {
5782 struct mgmt_cp_set_public_address *cp = data;
5783 bool changed;
5784 int err;
5785
5786 BT_DBG("%s", hdev->name);
5787
5788 if (hdev_is_powered(hdev))
5789 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5790 MGMT_STATUS_REJECTED);
5791
5792 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5793 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5794 MGMT_STATUS_INVALID_PARAMS);
5795
5796 if (!hdev->set_bdaddr)
5797 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5798 MGMT_STATUS_NOT_SUPPORTED);
5799
5800 hci_dev_lock(hdev);
5801
5802 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5803 bacpy(&hdev->public_addr, &cp->bdaddr);
5804
5805 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5806 if (err < 0)
5807 goto unlock;
5808
5809 if (!changed)
5810 goto unlock;
5811
5812 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5813 err = new_options(hdev, sk);
5814
5815 if (is_configured(hdev)) {
5816 mgmt_index_removed(hdev);
5817
5818 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5819
5820 set_bit(HCI_CONFIG, &hdev->dev_flags);
5821 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5822
5823 queue_work(hdev->req_workqueue, &hdev->power_on);
5824 }
5825
5826 unlock:
5827 hci_dev_unlock(hdev);
5828 return err;
5829 }
5830
5831 static const struct mgmt_handler {
5832 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5833 u16 data_len);
5834 bool var_len;
5835 size_t data_len;
5836 } mgmt_handlers[] = {
5837 { NULL }, /* 0x0000 (no command) */
5838 { read_version, false, MGMT_READ_VERSION_SIZE },
5839 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5840 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5841 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5842 { set_powered, false, MGMT_SETTING_SIZE },
5843 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5844 { set_connectable, false, MGMT_SETTING_SIZE },
5845 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5846 { set_bondable, false, MGMT_SETTING_SIZE },
5847 { set_link_security, false, MGMT_SETTING_SIZE },
5848 { set_ssp, false, MGMT_SETTING_SIZE },
5849 { set_hs, false, MGMT_SETTING_SIZE },
5850 { set_le, false, MGMT_SETTING_SIZE },
5851 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5852 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5853 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5854 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5855 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5856 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5857 { disconnect, false, MGMT_DISCONNECT_SIZE },
5858 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5859 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5860 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5861 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5862 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5863 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5864 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5865 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5866 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5867 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5868 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5869 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5870 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5871 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5872 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5873 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5874 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5875 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5876 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5877 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5878 { set_advertising, false, MGMT_SETTING_SIZE },
5879 { set_bredr, false, MGMT_SETTING_SIZE },
5880 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5881 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5882 { set_secure_conn, false, MGMT_SETTING_SIZE },
5883 { set_debug_keys, false, MGMT_SETTING_SIZE },
5884 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5885 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5886 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5887 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5888 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5889 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5890 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5891 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5892 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5893 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5894 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5895 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
5896 };
5897
5898 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5899 {
5900 void *buf;
5901 u8 *cp;
5902 struct mgmt_hdr *hdr;
5903 u16 opcode, index, len;
5904 struct hci_dev *hdev = NULL;
5905 const struct mgmt_handler *handler;
5906 int err;
5907
5908 BT_DBG("got %zu bytes", msglen);
5909
5910 if (msglen < sizeof(*hdr))
5911 return -EINVAL;
5912
5913 buf = kmalloc(msglen, GFP_KERNEL);
5914 if (!buf)
5915 return -ENOMEM;
5916
5917 if (memcpy_from_msg(buf, msg, msglen)) {
5918 err = -EFAULT;
5919 goto done;
5920 }
5921
5922 hdr = buf;
5923 opcode = __le16_to_cpu(hdr->opcode);
5924 index = __le16_to_cpu(hdr->index);
5925 len = __le16_to_cpu(hdr->len);
5926
5927 if (len != msglen - sizeof(*hdr)) {
5928 err = -EINVAL;
5929 goto done;
5930 }
5931
5932 if (index != MGMT_INDEX_NONE) {
5933 hdev = hci_dev_get(index);
5934 if (!hdev) {
5935 err = cmd_status(sk, index, opcode,
5936 MGMT_STATUS_INVALID_INDEX);
5937 goto done;
5938 }
5939
5940 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5941 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5942 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5943 err = cmd_status(sk, index, opcode,
5944 MGMT_STATUS_INVALID_INDEX);
5945 goto done;
5946 }
5947
5948 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5949 opcode != MGMT_OP_READ_CONFIG_INFO &&
5950 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5951 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5952 err = cmd_status(sk, index, opcode,
5953 MGMT_STATUS_INVALID_INDEX);
5954 goto done;
5955 }
5956 }
5957
5958 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5959 mgmt_handlers[opcode].func == NULL) {
5960 BT_DBG("Unknown op %u", opcode);
5961 err = cmd_status(sk, index, opcode,
5962 MGMT_STATUS_UNKNOWN_COMMAND);
5963 goto done;
5964 }
5965
5966 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5967 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5968 err = cmd_status(sk, index, opcode,
5969 MGMT_STATUS_INVALID_INDEX);
5970 goto done;
5971 }
5972
5973 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5974 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5975 err = cmd_status(sk, index, opcode,
5976 MGMT_STATUS_INVALID_INDEX);
5977 goto done;
5978 }
5979
5980 handler = &mgmt_handlers[opcode];
5981
5982 if ((handler->var_len && len < handler->data_len) ||
5983 (!handler->var_len && len != handler->data_len)) {
5984 err = cmd_status(sk, index, opcode,
5985 MGMT_STATUS_INVALID_PARAMS);
5986 goto done;
5987 }
5988
5989 if (hdev)
5990 mgmt_init_hdev(sk, hdev);
5991
5992 cp = buf + sizeof(*hdr);
5993
5994 err = handler->func(sk, hdev, cp, len);
5995 if (err < 0)
5996 goto done;
5997
5998 err = msglen;
5999
6000 done:
6001 if (hdev)
6002 hci_dev_put(hdev);
6003
6004 kfree(buf);
6005 return err;
6006 }
6007
6008 void mgmt_index_added(struct hci_dev *hdev)
6009 {
6010 if (hdev->dev_type != HCI_BREDR)
6011 return;
6012
6013 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6014 return;
6015
6016 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6017 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6018 else
6019 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6020 }
6021
6022 void mgmt_index_removed(struct hci_dev *hdev)
6023 {
6024 u8 status = MGMT_STATUS_INVALID_INDEX;
6025
6026 if (hdev->dev_type != HCI_BREDR)
6027 return;
6028
6029 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6030 return;
6031
6032 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6033
6034 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6035 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6036 else
6037 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6038 }
6039
6040 /* This function requires the caller holds hdev->lock */
6041 static void restart_le_actions(struct hci_dev *hdev)
6042 {
6043 struct hci_conn_params *p;
6044
6045 list_for_each_entry(p, &hdev->le_conn_params, list) {
6046 /* Needed for AUTO_OFF case where might not "really"
6047 * have been powered off.
6048 */
6049 list_del_init(&p->action);
6050
6051 switch (p->auto_connect) {
6052 case HCI_AUTO_CONN_DIRECT:
6053 case HCI_AUTO_CONN_ALWAYS:
6054 list_add(&p->action, &hdev->pend_le_conns);
6055 break;
6056 case HCI_AUTO_CONN_REPORT:
6057 list_add(&p->action, &hdev->pend_le_reports);
6058 break;
6059 default:
6060 break;
6061 }
6062 }
6063
6064 hci_update_background_scan(hdev);
6065 }
6066
6067 static void powered_complete(struct hci_dev *hdev, u8 status)
6068 {
6069 struct cmd_lookup match = { NULL, hdev };
6070
6071 BT_DBG("status 0x%02x", status);
6072
6073 hci_dev_lock(hdev);
6074
6075 restart_le_actions(hdev);
6076
6077 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6078
6079 new_settings(hdev, match.sk);
6080
6081 hci_dev_unlock(hdev);
6082
6083 if (match.sk)
6084 sock_put(match.sk);
6085 }
6086
6087 static int powered_update_hci(struct hci_dev *hdev)
6088 {
6089 struct hci_request req;
6090 u8 link_sec;
6091
6092 hci_req_init(&req, hdev);
6093
6094 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6095 !lmp_host_ssp_capable(hdev)) {
6096 u8 ssp = 1;
6097
6098 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6099 }
6100
6101 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6102 u8 sc = 0x01;
6103 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6104 }
6105
6106 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6107 lmp_bredr_capable(hdev)) {
6108 struct hci_cp_write_le_host_supported cp;
6109
6110 cp.le = 0x01;
6111 cp.simul = 0x00;
6112
6113 /* Check first if we already have the right
6114 * host state (host features set)
6115 */
6116 if (cp.le != lmp_host_le_capable(hdev) ||
6117 cp.simul != lmp_host_le_br_capable(hdev))
6118 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6119 sizeof(cp), &cp);
6120 }
6121
6122 if (lmp_le_capable(hdev)) {
6123 /* Make sure the controller has a good default for
6124 * advertising data. This also applies to the case
6125 * where BR/EDR was toggled during the AUTO_OFF phase.
6126 */
6127 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6128 update_adv_data(&req);
6129 update_scan_rsp_data(&req);
6130 }
6131
6132 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6133 enable_advertising(&req);
6134 }
6135
6136 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6137 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6138 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6139 sizeof(link_sec), &link_sec);
6140
6141 if (lmp_bredr_capable(hdev)) {
6142 write_fast_connectable(&req, false);
6143 __hci_update_page_scan(&req);
6144 update_class(&req);
6145 update_name(&req);
6146 update_eir(&req);
6147 }
6148
6149 return hci_req_run(&req, powered_complete);
6150 }
6151
6152 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6153 {
6154 struct cmd_lookup match = { NULL, hdev };
6155 u8 status, zero_cod[] = { 0, 0, 0 };
6156 int err;
6157
6158 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6159 return 0;
6160
6161 if (powered) {
6162 if (powered_update_hci(hdev) == 0)
6163 return 0;
6164
6165 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6166 &match);
6167 goto new_settings;
6168 }
6169
6170 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6171
6172 /* If the power off is because of hdev unregistration let
6173 * use the appropriate INVALID_INDEX status. Otherwise use
6174 * NOT_POWERED. We cover both scenarios here since later in
6175 * mgmt_index_removed() any hci_conn callbacks will have already
6176 * been triggered, potentially causing misleading DISCONNECTED
6177 * status responses.
6178 */
6179 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6180 status = MGMT_STATUS_INVALID_INDEX;
6181 else
6182 status = MGMT_STATUS_NOT_POWERED;
6183
6184 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6185
6186 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6187 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6188 zero_cod, sizeof(zero_cod), NULL);
6189
6190 new_settings:
6191 err = new_settings(hdev, match.sk);
6192
6193 if (match.sk)
6194 sock_put(match.sk);
6195
6196 return err;
6197 }
6198
6199 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6200 {
6201 struct pending_cmd *cmd;
6202 u8 status;
6203
6204 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6205 if (!cmd)
6206 return;
6207
6208 if (err == -ERFKILL)
6209 status = MGMT_STATUS_RFKILLED;
6210 else
6211 status = MGMT_STATUS_FAILED;
6212
6213 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6214
6215 mgmt_pending_remove(cmd);
6216 }
6217
6218 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6219 {
6220 struct hci_request req;
6221
6222 hci_dev_lock(hdev);
6223
6224 /* When discoverable timeout triggers, then just make sure
6225 * the limited discoverable flag is cleared. Even in the case
6226 * of a timeout triggered from general discoverable, it is
6227 * safe to unconditionally clear the flag.
6228 */
6229 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6230 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6231
6232 hci_req_init(&req, hdev);
6233 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6234 u8 scan = SCAN_PAGE;
6235 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6236 sizeof(scan), &scan);
6237 }
6238 update_class(&req);
6239 update_adv_data(&req);
6240 hci_req_run(&req, NULL);
6241
6242 hdev->discov_timeout = 0;
6243
6244 new_settings(hdev, NULL);
6245
6246 hci_dev_unlock(hdev);
6247 }
6248
6249 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6250 bool persistent)
6251 {
6252 struct mgmt_ev_new_link_key ev;
6253
6254 memset(&ev, 0, sizeof(ev));
6255
6256 ev.store_hint = persistent;
6257 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6258 ev.key.addr.type = BDADDR_BREDR;
6259 ev.key.type = key->type;
6260 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6261 ev.key.pin_len = key->pin_len;
6262
6263 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6264 }
6265
6266 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6267 {
6268 switch (ltk->type) {
6269 case SMP_LTK:
6270 case SMP_LTK_SLAVE:
6271 if (ltk->authenticated)
6272 return MGMT_LTK_AUTHENTICATED;
6273 return MGMT_LTK_UNAUTHENTICATED;
6274 case SMP_LTK_P256:
6275 if (ltk->authenticated)
6276 return MGMT_LTK_P256_AUTH;
6277 return MGMT_LTK_P256_UNAUTH;
6278 case SMP_LTK_P256_DEBUG:
6279 return MGMT_LTK_P256_DEBUG;
6280 }
6281
6282 return MGMT_LTK_UNAUTHENTICATED;
6283 }
6284
6285 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6286 {
6287 struct mgmt_ev_new_long_term_key ev;
6288
6289 memset(&ev, 0, sizeof(ev));
6290
6291 /* Devices using resolvable or non-resolvable random addresses
6292 * without providing an indentity resolving key don't require
6293 * to store long term keys. Their addresses will change the
6294 * next time around.
6295 *
6296 * Only when a remote device provides an identity address
6297 * make sure the long term key is stored. If the remote
6298 * identity is known, the long term keys are internally
6299 * mapped to the identity address. So allow static random
6300 * and public addresses here.
6301 */
6302 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6303 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6304 ev.store_hint = 0x00;
6305 else
6306 ev.store_hint = persistent;
6307
6308 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6309 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6310 ev.key.type = mgmt_ltk_type(key);
6311 ev.key.enc_size = key->enc_size;
6312 ev.key.ediv = key->ediv;
6313 ev.key.rand = key->rand;
6314
6315 if (key->type == SMP_LTK)
6316 ev.key.master = 1;
6317
6318 memcpy(ev.key.val, key->val, sizeof(key->val));
6319
6320 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6321 }
6322
6323 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6324 {
6325 struct mgmt_ev_new_irk ev;
6326
6327 memset(&ev, 0, sizeof(ev));
6328
6329 /* For identity resolving keys from devices that are already
6330 * using a public address or static random address, do not
6331 * ask for storing this key. The identity resolving key really
6332 * is only mandatory for devices using resovlable random
6333 * addresses.
6334 *
6335 * Storing all identity resolving keys has the downside that
6336 * they will be also loaded on next boot of they system. More
6337 * identity resolving keys, means more time during scanning is
6338 * needed to actually resolve these addresses.
6339 */
6340 if (bacmp(&irk->rpa, BDADDR_ANY))
6341 ev.store_hint = 0x01;
6342 else
6343 ev.store_hint = 0x00;
6344
6345 bacpy(&ev.rpa, &irk->rpa);
6346 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6347 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6348 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6349
6350 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6351 }
6352
6353 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6354 bool persistent)
6355 {
6356 struct mgmt_ev_new_csrk ev;
6357
6358 memset(&ev, 0, sizeof(ev));
6359
6360 /* Devices using resolvable or non-resolvable random addresses
6361 * without providing an indentity resolving key don't require
6362 * to store signature resolving keys. Their addresses will change
6363 * the next time around.
6364 *
6365 * Only when a remote device provides an identity address
6366 * make sure the signature resolving key is stored. So allow
6367 * static random and public addresses here.
6368 */
6369 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6370 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6371 ev.store_hint = 0x00;
6372 else
6373 ev.store_hint = persistent;
6374
6375 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6376 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6377 ev.key.master = csrk->master;
6378 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6379
6380 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6381 }
6382
6383 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6384 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6385 u16 max_interval, u16 latency, u16 timeout)
6386 {
6387 struct mgmt_ev_new_conn_param ev;
6388
6389 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6390 return;
6391
6392 memset(&ev, 0, sizeof(ev));
6393 bacpy(&ev.addr.bdaddr, bdaddr);
6394 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6395 ev.store_hint = store_hint;
6396 ev.min_interval = cpu_to_le16(min_interval);
6397 ev.max_interval = cpu_to_le16(max_interval);
6398 ev.latency = cpu_to_le16(latency);
6399 ev.timeout = cpu_to_le16(timeout);
6400
6401 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6402 }
6403
6404 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6405 u8 data_len)
6406 {
6407 eir[eir_len++] = sizeof(type) + data_len;
6408 eir[eir_len++] = type;
6409 memcpy(&eir[eir_len], data, data_len);
6410 eir_len += data_len;
6411
6412 return eir_len;
6413 }
6414
6415 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6416 u32 flags, u8 *name, u8 name_len)
6417 {
6418 char buf[512];
6419 struct mgmt_ev_device_connected *ev = (void *) buf;
6420 u16 eir_len = 0;
6421
6422 bacpy(&ev->addr.bdaddr, &conn->dst);
6423 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6424
6425 ev->flags = __cpu_to_le32(flags);
6426
6427 /* We must ensure that the EIR Data fields are ordered and
6428 * unique. Keep it simple for now and avoid the problem by not
6429 * adding any BR/EDR data to the LE adv.
6430 */
6431 if (conn->le_adv_data_len > 0) {
6432 memcpy(&ev->eir[eir_len],
6433 conn->le_adv_data, conn->le_adv_data_len);
6434 eir_len = conn->le_adv_data_len;
6435 } else {
6436 if (name_len > 0)
6437 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6438 name, name_len);
6439
6440 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6441 eir_len = eir_append_data(ev->eir, eir_len,
6442 EIR_CLASS_OF_DEV,
6443 conn->dev_class, 3);
6444 }
6445
6446 ev->eir_len = cpu_to_le16(eir_len);
6447
6448 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6449 sizeof(*ev) + eir_len, NULL);
6450 }
6451
6452 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6453 {
6454 struct sock **sk = data;
6455
6456 cmd->cmd_complete(cmd, 0);
6457
6458 *sk = cmd->sk;
6459 sock_hold(*sk);
6460
6461 mgmt_pending_remove(cmd);
6462 }
6463
6464 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6465 {
6466 struct hci_dev *hdev = data;
6467 struct mgmt_cp_unpair_device *cp = cmd->param;
6468
6469 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6470
6471 cmd->cmd_complete(cmd, 0);
6472 mgmt_pending_remove(cmd);
6473 }
6474
6475 bool mgmt_powering_down(struct hci_dev *hdev)
6476 {
6477 struct pending_cmd *cmd;
6478 struct mgmt_mode *cp;
6479
6480 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6481 if (!cmd)
6482 return false;
6483
6484 cp = cmd->param;
6485 if (!cp->val)
6486 return true;
6487
6488 return false;
6489 }
6490
6491 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6492 u8 link_type, u8 addr_type, u8 reason,
6493 bool mgmt_connected)
6494 {
6495 struct mgmt_ev_device_disconnected ev;
6496 struct sock *sk = NULL;
6497
6498 /* The connection is still in hci_conn_hash so test for 1
6499 * instead of 0 to know if this is the last one.
6500 */
6501 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6502 cancel_delayed_work(&hdev->power_off);
6503 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6504 }
6505
6506 if (!mgmt_connected)
6507 return;
6508
6509 if (link_type != ACL_LINK && link_type != LE_LINK)
6510 return;
6511
6512 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6513
6514 bacpy(&ev.addr.bdaddr, bdaddr);
6515 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6516 ev.reason = reason;
6517
6518 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6519
6520 if (sk)
6521 sock_put(sk);
6522
6523 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6524 hdev);
6525 }
6526
6527 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6528 u8 link_type, u8 addr_type, u8 status)
6529 {
6530 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6531 struct mgmt_cp_disconnect *cp;
6532 struct pending_cmd *cmd;
6533
6534 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6535 hdev);
6536
6537 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6538 if (!cmd)
6539 return;
6540
6541 cp = cmd->param;
6542
6543 if (bacmp(bdaddr, &cp->addr.bdaddr))
6544 return;
6545
6546 if (cp->addr.type != bdaddr_type)
6547 return;
6548
6549 cmd->cmd_complete(cmd, mgmt_status(status));
6550 mgmt_pending_remove(cmd);
6551 }
6552
6553 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6554 u8 addr_type, u8 status)
6555 {
6556 struct mgmt_ev_connect_failed ev;
6557
6558 /* The connection is still in hci_conn_hash so test for 1
6559 * instead of 0 to know if this is the last one.
6560 */
6561 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6562 cancel_delayed_work(&hdev->power_off);
6563 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6564 }
6565
6566 bacpy(&ev.addr.bdaddr, bdaddr);
6567 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6568 ev.status = mgmt_status(status);
6569
6570 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6571 }
6572
6573 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6574 {
6575 struct mgmt_ev_pin_code_request ev;
6576
6577 bacpy(&ev.addr.bdaddr, bdaddr);
6578 ev.addr.type = BDADDR_BREDR;
6579 ev.secure = secure;
6580
6581 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6582 }
6583
6584 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6585 u8 status)
6586 {
6587 struct pending_cmd *cmd;
6588
6589 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6590 if (!cmd)
6591 return;
6592
6593 cmd->cmd_complete(cmd, mgmt_status(status));
6594 mgmt_pending_remove(cmd);
6595 }
6596
6597 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6598 u8 status)
6599 {
6600 struct pending_cmd *cmd;
6601
6602 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6603 if (!cmd)
6604 return;
6605
6606 cmd->cmd_complete(cmd, mgmt_status(status));
6607 mgmt_pending_remove(cmd);
6608 }
6609
6610 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6611 u8 link_type, u8 addr_type, u32 value,
6612 u8 confirm_hint)
6613 {
6614 struct mgmt_ev_user_confirm_request ev;
6615
6616 BT_DBG("%s", hdev->name);
6617
6618 bacpy(&ev.addr.bdaddr, bdaddr);
6619 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6620 ev.confirm_hint = confirm_hint;
6621 ev.value = cpu_to_le32(value);
6622
6623 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6624 NULL);
6625 }
6626
6627 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6628 u8 link_type, u8 addr_type)
6629 {
6630 struct mgmt_ev_user_passkey_request ev;
6631
6632 BT_DBG("%s", hdev->name);
6633
6634 bacpy(&ev.addr.bdaddr, bdaddr);
6635 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6636
6637 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6638 NULL);
6639 }
6640
6641 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6642 u8 link_type, u8 addr_type, u8 status,
6643 u8 opcode)
6644 {
6645 struct pending_cmd *cmd;
6646
6647 cmd = mgmt_pending_find(opcode, hdev);
6648 if (!cmd)
6649 return -ENOENT;
6650
6651 cmd->cmd_complete(cmd, mgmt_status(status));
6652 mgmt_pending_remove(cmd);
6653
6654 return 0;
6655 }
6656
6657 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6658 u8 link_type, u8 addr_type, u8 status)
6659 {
6660 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6661 status, MGMT_OP_USER_CONFIRM_REPLY);
6662 }
6663
6664 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6665 u8 link_type, u8 addr_type, u8 status)
6666 {
6667 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6668 status,
6669 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6670 }
6671
6672 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6673 u8 link_type, u8 addr_type, u8 status)
6674 {
6675 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6676 status, MGMT_OP_USER_PASSKEY_REPLY);
6677 }
6678
6679 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6680 u8 link_type, u8 addr_type, u8 status)
6681 {
6682 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6683 status,
6684 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6685 }
6686
6687 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6688 u8 link_type, u8 addr_type, u32 passkey,
6689 u8 entered)
6690 {
6691 struct mgmt_ev_passkey_notify ev;
6692
6693 BT_DBG("%s", hdev->name);
6694
6695 bacpy(&ev.addr.bdaddr, bdaddr);
6696 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6697 ev.passkey = __cpu_to_le32(passkey);
6698 ev.entered = entered;
6699
6700 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6701 }
6702
6703 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6704 {
6705 struct mgmt_ev_auth_failed ev;
6706 struct pending_cmd *cmd;
6707 u8 status = mgmt_status(hci_status);
6708
6709 bacpy(&ev.addr.bdaddr, &conn->dst);
6710 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6711 ev.status = status;
6712
6713 cmd = find_pairing(conn);
6714
6715 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6716 cmd ? cmd->sk : NULL);
6717
6718 if (cmd) {
6719 cmd->cmd_complete(cmd, status);
6720 mgmt_pending_remove(cmd);
6721 }
6722 }
6723
6724 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6725 {
6726 struct cmd_lookup match = { NULL, hdev };
6727 bool changed;
6728
6729 if (status) {
6730 u8 mgmt_err = mgmt_status(status);
6731 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6732 cmd_status_rsp, &mgmt_err);
6733 return;
6734 }
6735
6736 if (test_bit(HCI_AUTH, &hdev->flags))
6737 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6738 &hdev->dev_flags);
6739 else
6740 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6741 &hdev->dev_flags);
6742
6743 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6744 &match);
6745
6746 if (changed)
6747 new_settings(hdev, match.sk);
6748
6749 if (match.sk)
6750 sock_put(match.sk);
6751 }
6752
6753 static void clear_eir(struct hci_request *req)
6754 {
6755 struct hci_dev *hdev = req->hdev;
6756 struct hci_cp_write_eir cp;
6757
6758 if (!lmp_ext_inq_capable(hdev))
6759 return;
6760
6761 memset(hdev->eir, 0, sizeof(hdev->eir));
6762
6763 memset(&cp, 0, sizeof(cp));
6764
6765 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6766 }
6767
6768 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6769 {
6770 struct cmd_lookup match = { NULL, hdev };
6771 struct hci_request req;
6772 bool changed = false;
6773
6774 if (status) {
6775 u8 mgmt_err = mgmt_status(status);
6776
6777 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6778 &hdev->dev_flags)) {
6779 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6780 new_settings(hdev, NULL);
6781 }
6782
6783 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6784 &mgmt_err);
6785 return;
6786 }
6787
6788 if (enable) {
6789 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6790 } else {
6791 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6792 if (!changed)
6793 changed = test_and_clear_bit(HCI_HS_ENABLED,
6794 &hdev->dev_flags);
6795 else
6796 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6797 }
6798
6799 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6800
6801 if (changed)
6802 new_settings(hdev, match.sk);
6803
6804 if (match.sk)
6805 sock_put(match.sk);
6806
6807 hci_req_init(&req, hdev);
6808
6809 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6810 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6811 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6812 sizeof(enable), &enable);
6813 update_eir(&req);
6814 } else {
6815 clear_eir(&req);
6816 }
6817
6818 hci_req_run(&req, NULL);
6819 }
6820
6821 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6822 {
6823 struct cmd_lookup match = { NULL, hdev };
6824 bool changed = false;
6825
6826 if (status) {
6827 u8 mgmt_err = mgmt_status(status);
6828
6829 if (enable) {
6830 if (test_and_clear_bit(HCI_SC_ENABLED,
6831 &hdev->dev_flags))
6832 new_settings(hdev, NULL);
6833 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6834 }
6835
6836 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6837 cmd_status_rsp, &mgmt_err);
6838 return;
6839 }
6840
6841 if (enable) {
6842 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6843 } else {
6844 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6845 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6846 }
6847
6848 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6849 settings_rsp, &match);
6850
6851 if (changed)
6852 new_settings(hdev, match.sk);
6853
6854 if (match.sk)
6855 sock_put(match.sk);
6856 }
6857
6858 static void sk_lookup(struct pending_cmd *cmd, void *data)
6859 {
6860 struct cmd_lookup *match = data;
6861
6862 if (match->sk == NULL) {
6863 match->sk = cmd->sk;
6864 sock_hold(match->sk);
6865 }
6866 }
6867
6868 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6869 u8 status)
6870 {
6871 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6872
6873 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6874 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6875 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6876
6877 if (!status)
6878 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6879 NULL);
6880
6881 if (match.sk)
6882 sock_put(match.sk);
6883 }
6884
6885 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6886 {
6887 struct mgmt_cp_set_local_name ev;
6888 struct pending_cmd *cmd;
6889
6890 if (status)
6891 return;
6892
6893 memset(&ev, 0, sizeof(ev));
6894 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6895 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6896
6897 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6898 if (!cmd) {
6899 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6900
6901 /* If this is a HCI command related to powering on the
6902 * HCI dev don't send any mgmt signals.
6903 */
6904 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6905 return;
6906 }
6907
6908 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6909 cmd ? cmd->sk : NULL);
6910 }
6911
6912 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6913 u8 *rand192, u8 *hash256, u8 *rand256,
6914 u8 status)
6915 {
6916 struct pending_cmd *cmd;
6917
6918 BT_DBG("%s status %u", hdev->name, status);
6919
6920 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6921 if (!cmd)
6922 return;
6923
6924 if (status) {
6925 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6926 mgmt_status(status));
6927 } else {
6928 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6929 struct mgmt_rp_read_local_oob_ext_data rp;
6930
6931 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6932 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6933
6934 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6935 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6936
6937 cmd_complete(cmd->sk, hdev->id,
6938 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6939 &rp, sizeof(rp));
6940 } else {
6941 struct mgmt_rp_read_local_oob_data rp;
6942
6943 memcpy(rp.hash, hash192, sizeof(rp.hash));
6944 memcpy(rp.rand, rand192, sizeof(rp.rand));
6945
6946 cmd_complete(cmd->sk, hdev->id,
6947 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6948 &rp, sizeof(rp));
6949 }
6950 }
6951
6952 mgmt_pending_remove(cmd);
6953 }
6954
6955 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6956 {
6957 int i;
6958
6959 for (i = 0; i < uuid_count; i++) {
6960 if (!memcmp(uuid, uuids[i], 16))
6961 return true;
6962 }
6963
6964 return false;
6965 }
6966
6967 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6968 {
6969 u16 parsed = 0;
6970
6971 while (parsed < eir_len) {
6972 u8 field_len = eir[0];
6973 u8 uuid[16];
6974 int i;
6975
6976 if (field_len == 0)
6977 break;
6978
6979 if (eir_len - parsed < field_len + 1)
6980 break;
6981
6982 switch (eir[1]) {
6983 case EIR_UUID16_ALL:
6984 case EIR_UUID16_SOME:
6985 for (i = 0; i + 3 <= field_len; i += 2) {
6986 memcpy(uuid, bluetooth_base_uuid, 16);
6987 uuid[13] = eir[i + 3];
6988 uuid[12] = eir[i + 2];
6989 if (has_uuid(uuid, uuid_count, uuids))
6990 return true;
6991 }
6992 break;
6993 case EIR_UUID32_ALL:
6994 case EIR_UUID32_SOME:
6995 for (i = 0; i + 5 <= field_len; i += 4) {
6996 memcpy(uuid, bluetooth_base_uuid, 16);
6997 uuid[15] = eir[i + 5];
6998 uuid[14] = eir[i + 4];
6999 uuid[13] = eir[i + 3];
7000 uuid[12] = eir[i + 2];
7001 if (has_uuid(uuid, uuid_count, uuids))
7002 return true;
7003 }
7004 break;
7005 case EIR_UUID128_ALL:
7006 case EIR_UUID128_SOME:
7007 for (i = 0; i + 17 <= field_len; i += 16) {
7008 memcpy(uuid, eir + i + 2, 16);
7009 if (has_uuid(uuid, uuid_count, uuids))
7010 return true;
7011 }
7012 break;
7013 }
7014
7015 parsed += field_len + 1;
7016 eir += field_len + 1;
7017 }
7018
7019 return false;
7020 }
7021
7022 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7023 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7024 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7025 {
7026 char buf[512];
7027 struct mgmt_ev_device_found *ev = (void *) buf;
7028 size_t ev_size;
7029 bool match;
7030
7031 /* Don't send events for a non-kernel initiated discovery. With
7032 * LE one exception is if we have pend_le_reports > 0 in which
7033 * case we're doing passive scanning and want these events.
7034 */
7035 if (!hci_discovery_active(hdev)) {
7036 if (link_type == ACL_LINK)
7037 return;
7038 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7039 return;
7040 }
7041
7042 /* When using service discovery with a RSSI threshold, then check
7043 * if such a RSSI threshold is specified. If a RSSI threshold has
7044 * been specified, then all results with a RSSI smaller than the
7045 * RSSI threshold will be dropped.
7046 *
7047 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7048 * the results are also dropped.
7049 */
7050 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7051 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7052 return;
7053
7054 /* Make sure that the buffer is big enough. The 5 extra bytes
7055 * are for the potential CoD field.
7056 */
7057 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7058 return;
7059
7060 memset(buf, 0, sizeof(buf));
7061
7062 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7063 * RSSI value was reported as 0 when not available. This behavior
7064 * is kept when using device discovery. This is required for full
7065 * backwards compatibility with the API.
7066 *
7067 * However when using service discovery, the value 127 will be
7068 * returned when the RSSI is not available.
7069 */
7070 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7071 rssi = 0;
7072
7073 bacpy(&ev->addr.bdaddr, bdaddr);
7074 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7075 ev->rssi = rssi;
7076 ev->flags = cpu_to_le32(flags);
7077
7078 if (eir_len > 0) {
7079 /* When using service discovery and a list of UUID is
7080 * provided, results with no matching UUID should be
7081 * dropped. In case there is a match the result is
7082 * kept and checking possible scan response data
7083 * will be skipped.
7084 */
7085 if (hdev->discovery.uuid_count > 0)
7086 match = eir_has_uuids(eir, eir_len,
7087 hdev->discovery.uuid_count,
7088 hdev->discovery.uuids);
7089 else
7090 match = true;
7091
7092 if (!match && !scan_rsp_len)
7093 return;
7094
7095 /* Copy EIR or advertising data into event */
7096 memcpy(ev->eir, eir, eir_len);
7097 } else {
7098 /* When using service discovery and a list of UUID is
7099 * provided, results with empty EIR or advertising data
7100 * should be dropped since they do not match any UUID.
7101 */
7102 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7103 return;
7104
7105 match = false;
7106 }
7107
7108 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7109 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7110 dev_class, 3);
7111
7112 if (scan_rsp_len > 0) {
7113 /* When using service discovery and a list of UUID is
7114 * provided, results with no matching UUID should be
7115 * dropped if there is no previous match from the
7116 * advertising data.
7117 */
7118 if (hdev->discovery.uuid_count > 0) {
7119 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7120 hdev->discovery.uuid_count,
7121 hdev->discovery.uuids))
7122 return;
7123 }
7124
7125 /* Append scan response data to event */
7126 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7127 } else {
7128 /* When using service discovery and a list of UUID is
7129 * provided, results with empty scan response and no
7130 * previous matched advertising data should be dropped.
7131 */
7132 if (hdev->discovery.uuid_count > 0 && !match)
7133 return;
7134 }
7135
7136 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7137 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7138
7139 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7140 }
7141
7142 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7143 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7144 {
7145 struct mgmt_ev_device_found *ev;
7146 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7147 u16 eir_len;
7148
7149 ev = (struct mgmt_ev_device_found *) buf;
7150
7151 memset(buf, 0, sizeof(buf));
7152
7153 bacpy(&ev->addr.bdaddr, bdaddr);
7154 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7155 ev->rssi = rssi;
7156
7157 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7158 name_len);
7159
7160 ev->eir_len = cpu_to_le16(eir_len);
7161
7162 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7163 }
7164
7165 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7166 {
7167 struct mgmt_ev_discovering ev;
7168
7169 BT_DBG("%s discovering %u", hdev->name, discovering);
7170
7171 memset(&ev, 0, sizeof(ev));
7172 ev.type = hdev->discovery.type;
7173 ev.discovering = discovering;
7174
7175 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7176 }
7177
7178 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7179 {
7180 BT_DBG("%s status %u", hdev->name, status);
7181 }
7182
7183 void mgmt_reenable_advertising(struct hci_dev *hdev)
7184 {
7185 struct hci_request req;
7186
7187 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7188 return;
7189
7190 hci_req_init(&req, hdev);
7191 enable_advertising(&req);
7192 hci_req_run(&req, adv_enable_complete);
7193 }
This page took 0.183476 seconds and 4 git commands to generate.