Bluetooth: Limit BR/EDR switching for LE only with secure connections
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "smp.h"
37
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
40
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_READ_INFO,
44 MGMT_OP_SET_POWERED,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_BONDABLE,
49 MGMT_OP_SET_LINK_SECURITY,
50 MGMT_OP_SET_SSP,
51 MGMT_OP_SET_HS,
52 MGMT_OP_SET_LE,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_ADD_UUID,
56 MGMT_OP_REMOVE_UUID,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_DISCONNECT,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_PAIR_DEVICE,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_CONFIRM_NAME,
77 MGMT_OP_BLOCK_DEVICE,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_BREDR,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
86 MGMT_OP_SET_PRIVACY,
87 MGMT_OP_LOAD_IRKS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_ADD_DEVICE,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
98 };
99
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
102 MGMT_EV_INDEX_ADDED,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
115 MGMT_EV_AUTH_FAILED,
116 MGMT_EV_DEVICE_FOUND,
117 MGMT_EV_DISCOVERING,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_NEW_IRK,
123 MGMT_EV_NEW_CSRK,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
130 };
131
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
133
134 struct pending_cmd {
135 struct list_head list;
136 u16 opcode;
137 int index;
138 void *param;
139 size_t param_len;
140 struct sock *sk;
141 void *user_data;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
143 };
144
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_SUCCESS,
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
208 };
209
210 static u8 mgmt_status(u8 hci_status)
211 {
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
214
215 return MGMT_STATUS_FAILED;
216 }
217
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
220 {
221 struct sk_buff *skb;
222 struct mgmt_hdr *hdr;
223
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
225 if (!skb)
226 return -ENOMEM;
227
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
230 if (hdev)
231 hdr->index = cpu_to_le16(hdev->id);
232 else
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
235
236 if (data)
237 memcpy(skb_put(skb, data_len), data, data_len);
238
239 /* Time stamp */
240 __net_timestamp(skb);
241
242 hci_send_to_control(skb, skip_sk);
243 kfree_skb(skb);
244
245 return 0;
246 }
247
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
249 {
250 struct sk_buff *skb;
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
253 int err;
254
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
258 if (!skb)
259 return -ENOMEM;
260
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
262
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
266
267 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->status = status;
269 ev->opcode = cpu_to_le16(cmd);
270
271 err = sock_queue_rcv_skb(sk, skb);
272 if (err < 0)
273 kfree_skb(skb);
274
275 return err;
276 }
277
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
280 {
281 struct sk_buff *skb;
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
284 int err;
285
286 BT_DBG("sock %p", sk);
287
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
289 if (!skb)
290 return -ENOMEM;
291
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
293
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
300 ev->status = status;
301
302 if (rp)
303 memcpy(ev->data, rp, rp_len);
304
305 err = sock_queue_rcv_skb(sk, skb);
306 if (err < 0)
307 kfree_skb(skb);
308
309 return err;
310 }
311
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_version rp;
316
317 BT_DBG("sock %p", sk);
318
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
321
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
323 sizeof(rp));
324 }
325
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
327 u16 data_len)
328 {
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
332 __le16 *opcode;
333 size_t rp_size;
334 int i, err;
335
336 BT_DBG("sock %p", sk);
337
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339
340 rp = kmalloc(rp_size, GFP_KERNEL);
341 if (!rp)
342 return -ENOMEM;
343
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
346
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
349
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
352
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
354 rp_size);
355 kfree(rp);
356
357 return err;
358 }
359
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
361 u16 data_len)
362 {
363 struct mgmt_rp_read_index_list *rp;
364 struct hci_dev *d;
365 size_t rp_len;
366 u16 count;
367 int err;
368
369 BT_DBG("sock %p", sk);
370
371 read_lock(&hci_dev_list_lock);
372
373 count = 0;
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
377 count++;
378 }
379
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
382 if (!rp) {
383 read_unlock(&hci_dev_list_lock);
384 return -ENOMEM;
385 }
386
387 count = 0;
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
392 continue;
393
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
396 */
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
398 continue;
399
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
404 }
405 }
406
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
409
410 read_unlock(&hci_dev_list_lock);
411
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
413 rp_len);
414
415 kfree(rp);
416
417 return err;
418 }
419
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
422 {
423 struct mgmt_rp_read_unconf_index_list *rp;
424 struct hci_dev *d;
425 size_t rp_len;
426 u16 count;
427 int err;
428
429 BT_DBG("sock %p", sk);
430
431 read_lock(&hci_dev_list_lock);
432
433 count = 0;
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
437 count++;
438 }
439
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
442 if (!rp) {
443 read_unlock(&hci_dev_list_lock);
444 return -ENOMEM;
445 }
446
447 count = 0;
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
452 continue;
453
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
456 */
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
458 continue;
459
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
464 }
465 }
466
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
469
470 read_unlock(&hci_dev_list_lock);
471
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
473 0, rp, rp_len);
474
475 kfree(rp);
476
477 return err;
478 }
479
480 static bool is_configured(struct hci_dev *hdev)
481 {
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
484 return false;
485
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
488 return false;
489
490 return true;
491 }
492
493 static __le32 get_missing_options(struct hci_dev *hdev)
494 {
495 u32 options = 0;
496
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504
505 return cpu_to_le32(options);
506 }
507
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 {
510 __le32 options = get_missing_options(hdev);
511
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
514 }
515
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 {
518 __le32 options = get_missing_options(hdev);
519
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
521 sizeof(options));
522 }
523
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
526 {
527 struct mgmt_rp_read_config_info rp;
528 u32 options = 0;
529
530 BT_DBG("sock %p %s", sk, hdev->name);
531
532 hci_dev_lock(hdev);
533
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
545
546 hci_dev_unlock(hdev);
547
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
549 sizeof(rp));
550 }
551
552 static u32 get_supported_settings(struct hci_dev *hdev)
553 {
554 u32 settings = 0;
555
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
561
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
567
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
571 }
572
573 if (lmp_sc_capable(hdev))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
582 }
583
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 hdev->set_bdaddr)
586 settings |= MGMT_SETTING_CONFIGURATION;
587
588 return settings;
589 }
590
591 static u32 get_current_settings(struct hci_dev *hdev)
592 {
593 u32 settings = 0;
594
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
597
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
600
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
606
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
609
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
612
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
615
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
618
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
621
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
624
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
627
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
630
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
633
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
636
637 return settings;
638 }
639
640 #define PNP_INFO_SVCLASS_ID 0x1200
641
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 {
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
646
647 if (len < 4)
648 return ptr;
649
650 list_for_each_entry(uuid, &hdev->uuids, list) {
651 u16 uuid16;
652
653 if (uuid->size != 16)
654 continue;
655
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
657 if (uuid16 < 0x1100)
658 continue;
659
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
661 continue;
662
663 if (!uuids_start) {
664 uuids_start = ptr;
665 uuids_start[0] = 1;
666 uuids_start[1] = EIR_UUID16_ALL;
667 ptr += 2;
668 }
669
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
673 break;
674 }
675
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
679 }
680
681 return ptr;
682 }
683
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 {
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
688
689 if (len < 6)
690 return ptr;
691
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
694 continue;
695
696 if (!uuids_start) {
697 uuids_start = ptr;
698 uuids_start[0] = 1;
699 uuids_start[1] = EIR_UUID32_ALL;
700 ptr += 2;
701 }
702
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
706 break;
707 }
708
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 ptr += sizeof(u32);
711 uuids_start[0] += sizeof(u32);
712 }
713
714 return ptr;
715 }
716
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 {
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
721
722 if (len < 18)
723 return ptr;
724
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
727 continue;
728
729 if (!uuids_start) {
730 uuids_start = ptr;
731 uuids_start[0] = 1;
732 uuids_start[1] = EIR_UUID128_ALL;
733 ptr += 2;
734 }
735
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
739 break;
740 }
741
742 memcpy(ptr, uuid->uuid, 16);
743 ptr += 16;
744 uuids_start[0] += 16;
745 }
746
747 return ptr;
748 }
749
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 {
752 struct pending_cmd *cmd;
753
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
756 return cmd;
757 }
758
759 return NULL;
760 }
761
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
764 const void *data)
765 {
766 struct pending_cmd *cmd;
767
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
770 continue;
771 if (cmd->opcode == opcode)
772 return cmd;
773 }
774
775 return NULL;
776 }
777
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
779 {
780 u8 ad_len = 0;
781 size_t name_len;
782
783 name_len = strlen(hdev->dev_name);
784 if (name_len > 0) {
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786
787 if (name_len > max_len) {
788 name_len = max_len;
789 ptr[1] = EIR_NAME_SHORT;
790 } else
791 ptr[1] = EIR_NAME_COMPLETE;
792
793 ptr[0] = name_len + 1;
794
795 memcpy(ptr + 2, hdev->dev_name, name_len);
796
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
799 }
800
801 return ad_len;
802 }
803
804 static void update_scan_rsp_data(struct hci_request *req)
805 {
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
808 u8 len;
809
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
811 return;
812
813 memset(&cp, 0, sizeof(cp));
814
815 len = create_scan_rsp_data(hdev, cp.data);
816
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
819 return;
820
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
823
824 cp.length = len;
825
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
827 }
828
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 {
831 struct pending_cmd *cmd;
832
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
835 */
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 if (cmd) {
838 struct mgmt_mode *cp = cmd->param;
839 if (cp->val == 0x01)
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
843 } else {
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
848 }
849
850 return 0;
851 }
852
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 {
855 u8 ad_len = 0, flags = 0;
856
857 flags |= get_adv_discov_flags(hdev);
858
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
861
862 if (flags) {
863 BT_DBG("adv flags 0x%02x", flags);
864
865 ptr[0] = 2;
866 ptr[1] = EIR_FLAGS;
867 ptr[2] = flags;
868
869 ad_len += 3;
870 ptr += 3;
871 }
872
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[0] = 2;
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
877
878 ad_len += 3;
879 ptr += 3;
880 }
881
882 return ad_len;
883 }
884
885 static void update_adv_data(struct hci_request *req)
886 {
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
889 u8 len;
890
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
892 return;
893
894 memset(&cp, 0, sizeof(cp));
895
896 len = create_adv_data(hdev, cp.data);
897
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
900 return;
901
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
904
905 cp.length = len;
906
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
908 }
909
910 int mgmt_update_adv_data(struct hci_dev *hdev)
911 {
912 struct hci_request req;
913
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
916
917 return hci_req_run(&req, NULL);
918 }
919
920 static void create_eir(struct hci_dev *hdev, u8 *data)
921 {
922 u8 *ptr = data;
923 size_t name_len;
924
925 name_len = strlen(hdev->dev_name);
926
927 if (name_len > 0) {
928 /* EIR Data type */
929 if (name_len > 48) {
930 name_len = 48;
931 ptr[1] = EIR_NAME_SHORT;
932 } else
933 ptr[1] = EIR_NAME_COMPLETE;
934
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
937
938 memcpy(ptr + 2, hdev->dev_name, name_len);
939
940 ptr += (name_len + 2);
941 }
942
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
944 ptr[0] = 2;
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
947
948 ptr += 3;
949 }
950
951 if (hdev->devid_source > 0) {
952 ptr[0] = 9;
953 ptr[1] = EIR_DEVICE_ID;
954
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
959
960 ptr += 10;
961 }
962
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
966 }
967
968 static void update_eir(struct hci_request *req)
969 {
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
972
973 if (!hdev_is_powered(hdev))
974 return;
975
976 if (!lmp_ext_inq_capable(hdev))
977 return;
978
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
980 return;
981
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
983 return;
984
985 memset(&cp, 0, sizeof(cp));
986
987 create_eir(hdev, cp.data);
988
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
990 return;
991
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
993
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
995 }
996
997 static u8 get_service_classes(struct hci_dev *hdev)
998 {
999 struct bt_uuid *uuid;
1000 u8 val = 0;
1001
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1004
1005 return val;
1006 }
1007
1008 static void update_class(struct hci_request *req)
1009 {
1010 struct hci_dev *hdev = req->hdev;
1011 u8 cod[3];
1012
1013 BT_DBG("%s", hdev->name);
1014
1015 if (!hdev_is_powered(hdev))
1016 return;
1017
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1019 return;
1020
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1022 return;
1023
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1027
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1029 cod[1] |= 0x20;
1030
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1032 return;
1033
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1035 }
1036
1037 static bool get_connectable(struct hci_dev *hdev)
1038 {
1039 struct pending_cmd *cmd;
1040
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1043 */
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1045 if (cmd) {
1046 struct mgmt_mode *cp = cmd->param;
1047 return cp->val;
1048 }
1049
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1051 }
1052
1053 static void disable_advertising(struct hci_request *req)
1054 {
1055 u8 enable = 0x00;
1056
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1058 }
1059
1060 static void enable_advertising(struct hci_request *req)
1061 {
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1065 bool connectable;
1066
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1068 return;
1069
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1072
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 */
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1079
1080 connectable = get_connectable(hdev);
1081
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1085 */
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1087 return;
1088
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1095
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1097
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1099 }
1100
1101 static void service_cache_off(struct work_struct *work)
1102 {
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1106
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1108 return;
1109
1110 hci_req_init(&req, hdev);
1111
1112 hci_dev_lock(hdev);
1113
1114 update_eir(&req);
1115 update_class(&req);
1116
1117 hci_dev_unlock(hdev);
1118
1119 hci_req_run(&req, NULL);
1120 }
1121
1122 static void rpa_expired(struct work_struct *work)
1123 {
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1125 rpa_expired.work);
1126 struct hci_request req;
1127
1128 BT_DBG("");
1129
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1131
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1133 return;
1134
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1137 */
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1141 }
1142
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1144 {
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1146 return;
1147
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1150
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1154 * it
1155 */
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1157 }
1158
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1161 {
1162 struct mgmt_rp_read_info rp;
1163
1164 BT_DBG("sock %p %s", sk, hdev->name);
1165
1166 hci_dev_lock(hdev);
1167
1168 memset(&rp, 0, sizeof(rp));
1169
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1171
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1174
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1177
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1179
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1182
1183 hci_dev_unlock(hdev);
1184
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 sizeof(rp));
1187 }
1188
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1190 {
1191 sock_put(cmd->sk);
1192 kfree(cmd->param);
1193 kfree(cmd);
1194 }
1195
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1198 u16 len)
1199 {
1200 struct pending_cmd *cmd;
1201
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1203 if (!cmd)
1204 return NULL;
1205
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1208
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1210 if (!cmd->param) {
1211 kfree(cmd);
1212 return NULL;
1213 }
1214
1215 cmd->param_len = len;
1216
1217 cmd->sk = sk;
1218 sock_hold(sk);
1219
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1221
1222 return cmd;
1223 }
1224
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1227 void *data),
1228 void *data)
1229 {
1230 struct pending_cmd *cmd, *tmp;
1231
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1234 continue;
1235
1236 cb(cmd, data);
1237 }
1238 }
1239
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1241 {
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1244 }
1245
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252 }
1253
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1255 {
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1257
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 }
1262 }
1263
1264 static bool hci_stop_discovery(struct hci_request *req)
1265 {
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1269
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 } else {
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1277 }
1278
1279 return true;
1280
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 NAME_PENDING);
1284 if (!e)
1285 break;
1286
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 &cp);
1290
1291 return true;
1292
1293 default:
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1297 return true;
1298 }
1299
1300 break;
1301 }
1302
1303 return false;
1304 }
1305
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1307 {
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1312
1313 hci_req_init(&req, hdev);
1314
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1317 u8 scan = 0x00;
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 }
1320
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1323
1324 discov_stopped = hci_stop_discovery(&req);
1325
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1329
1330 switch (conn->state) {
1331 case BT_CONNECTED:
1332 case BT_CONFIG:
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 break;
1337 case BT_CONNECT:
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 0, NULL);
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 6, &conn->dst);
1344 break;
1345 case BT_CONNECT2:
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 sizeof(rej), &rej);
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 sizeof(rej), &rej);
1354 break;
1355 }
1356 }
1357
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362 return err;
1363 }
1364
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 u16 len)
1367 {
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1370 int err;
1371
1372 BT_DBG("request for %s", hdev->name);
1373
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1377
1378 hci_dev_lock(hdev);
1379
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1384 }
1385
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1388
1389 if (cp->val) {
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 data, len);
1392 err = mgmt_powered(hdev, 1);
1393 goto failed;
1394 }
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 if (cp->val) {
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 err = 0;
1411 } else {
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1414 if (!err)
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1417
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 err = 0;
1423 }
1424 }
1425
1426 failed:
1427 hci_dev_unlock(hdev);
1428 return err;
1429 }
1430
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432 {
1433 __le32 ev;
1434
1435 ev = cpu_to_le32(get_current_settings(hdev));
1436
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 }
1439
1440 int mgmt_new_settings(struct hci_dev *hdev)
1441 {
1442 return new_settings(hdev, NULL);
1443 }
1444
1445 struct cmd_lookup {
1446 struct sock *sk;
1447 struct hci_dev *hdev;
1448 u8 mgmt_status;
1449 };
1450
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1452 {
1453 struct cmd_lookup *match = data;
1454
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456
1457 list_del(&cmd->list);
1458
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1462 }
1463
1464 mgmt_pending_free(cmd);
1465 }
1466
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468 {
1469 u8 *status = data;
1470
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1473 }
1474
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1476 {
1477 if (cmd->cmd_complete) {
1478 u8 *status = data;
1479
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1482
1483 return;
1484 }
1485
1486 cmd_status_rsp(cmd, data);
1487 }
1488
1489 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1490 {
1491 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param, cmd->param_len);
1493 }
1494
1495 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1496 {
1497 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1499 }
1500
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1502 {
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1507 else
1508 return MGMT_STATUS_SUCCESS;
1509 }
1510
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1512 {
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1517 else
1518 return MGMT_STATUS_SUCCESS;
1519 }
1520
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1522 u16 opcode)
1523 {
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1527 bool changed;
1528
1529 BT_DBG("status 0x%02x", status);
1530
1531 hci_dev_lock(hdev);
1532
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1534 if (!cmd)
1535 goto unlock;
1536
1537 if (status) {
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1541 goto remove_cmd;
1542 }
1543
1544 cp = cmd->param;
1545 if (cp->val) {
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1547 &hdev->dev_flags);
1548
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1552 to);
1553 }
1554 } else {
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1556 &hdev->dev_flags);
1557 }
1558
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1560
1561 if (changed)
1562 new_settings(hdev, cmd->sk);
1563
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1567 * entries.
1568 */
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1571 update_class(&req);
1572 hci_req_run(&req, NULL);
1573
1574 remove_cmd:
1575 mgmt_pending_remove(cmd);
1576
1577 unlock:
1578 hci_dev_unlock(hdev);
1579 }
1580
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1582 u16 len)
1583 {
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1587 u16 timeout;
1588 u8 scan;
1589 int err;
1590
1591 BT_DBG("request for %s", hdev->name);
1592
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1597
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1601
1602 timeout = __le16_to_cpu(cp->timeout);
1603
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1606 */
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1611
1612 hci_dev_lock(hdev);
1613
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1617 goto failed;
1618 }
1619
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1623 MGMT_STATUS_BUSY);
1624 goto failed;
1625 }
1626
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1630 goto failed;
1631 }
1632
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1635
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 */
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1642 changed = true;
1643 }
1644
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1646 if (err < 0)
1647 goto failed;
1648
1649 if (changed)
1650 err = new_settings(hdev, sk);
1651
1652 goto failed;
1653 }
1654
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1658 */
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1664
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1668 to);
1669 }
1670
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1672 goto failed;
1673 }
1674
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1676 if (!cmd) {
1677 err = -ENOMEM;
1678 goto failed;
1679 }
1680
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1684 */
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1687
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 else
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693
1694 hci_req_init(&req, hdev);
1695
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1698 */
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1700 goto update_ad;
1701
1702 scan = SCAN_PAGE;
1703
1704 if (cp->val) {
1705 struct hci_cp_write_current_iac_lap hci_cp;
1706
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1716 } else {
1717 /* General discoverable mode */
1718 hci_cp.num_iac = 1;
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1722 }
1723
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726
1727 scan |= SCAN_INQUIRY;
1728 } else {
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1730 }
1731
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1733
1734 update_ad:
1735 update_adv_data(&req);
1736
1737 err = hci_req_run(&req, set_discoverable_complete);
1738 if (err < 0)
1739 mgmt_pending_remove(cmd);
1740
1741 failed:
1742 hci_dev_unlock(hdev);
1743 return err;
1744 }
1745
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 {
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1750 u8 type;
1751
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1753 return;
1754
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1756 return;
1757
1758 if (enable) {
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1760
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1763 } else {
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1768 }
1769
1770 acp.window = cpu_to_le16(0x0012);
1771
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1775 sizeof(acp), &acp);
1776
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1779 }
1780
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1782 u16 opcode)
1783 {
1784 struct pending_cmd *cmd;
1785 struct mgmt_mode *cp;
1786 bool conn_changed, discov_changed;
1787
1788 BT_DBG("status 0x%02x", status);
1789
1790 hci_dev_lock(hdev);
1791
1792 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1793 if (!cmd)
1794 goto unlock;
1795
1796 if (status) {
1797 u8 mgmt_err = mgmt_status(status);
1798 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1799 goto remove_cmd;
1800 }
1801
1802 cp = cmd->param;
1803 if (cp->val) {
1804 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1805 &hdev->dev_flags);
1806 discov_changed = false;
1807 } else {
1808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1809 &hdev->dev_flags);
1810 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1811 &hdev->dev_flags);
1812 }
1813
1814 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1815
1816 if (conn_changed || discov_changed) {
1817 new_settings(hdev, cmd->sk);
1818 hci_update_page_scan(hdev);
1819 if (discov_changed)
1820 mgmt_update_adv_data(hdev);
1821 hci_update_background_scan(hdev);
1822 }
1823
1824 remove_cmd:
1825 mgmt_pending_remove(cmd);
1826
1827 unlock:
1828 hci_dev_unlock(hdev);
1829 }
1830
1831 static int set_connectable_update_settings(struct hci_dev *hdev,
1832 struct sock *sk, u8 val)
1833 {
1834 bool changed = false;
1835 int err;
1836
1837 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1838 changed = true;
1839
1840 if (val) {
1841 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 } else {
1843 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1844 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1845 }
1846
1847 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1848 if (err < 0)
1849 return err;
1850
1851 if (changed) {
1852 hci_update_page_scan(hdev);
1853 hci_update_background_scan(hdev);
1854 return new_settings(hdev, sk);
1855 }
1856
1857 return 0;
1858 }
1859
1860 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1861 u16 len)
1862 {
1863 struct mgmt_mode *cp = data;
1864 struct pending_cmd *cmd;
1865 struct hci_request req;
1866 u8 scan;
1867 int err;
1868
1869 BT_DBG("request for %s", hdev->name);
1870
1871 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1872 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1874 MGMT_STATUS_REJECTED);
1875
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_INVALID_PARAMS);
1879
1880 hci_dev_lock(hdev);
1881
1882 if (!hdev_is_powered(hdev)) {
1883 err = set_connectable_update_settings(hdev, sk, cp->val);
1884 goto failed;
1885 }
1886
1887 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1895 if (!cmd) {
1896 err = -ENOMEM;
1897 goto failed;
1898 }
1899
1900 hci_req_init(&req, hdev);
1901
1902 /* If BR/EDR is not enabled and we disable advertising as a
1903 * by-product of disabling connectable, we need to update the
1904 * advertising flags.
1905 */
1906 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1907 if (!cp->val) {
1908 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1909 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1910 }
1911 update_adv_data(&req);
1912 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1913 if (cp->val) {
1914 scan = SCAN_PAGE;
1915 } else {
1916 /* If we don't have any whitelist entries just
1917 * disable all scanning. If there are entries
1918 * and we had both page and inquiry scanning
1919 * enabled then fall back to only page scanning.
1920 * Otherwise no changes are needed.
1921 */
1922 if (list_empty(&hdev->whitelist))
1923 scan = SCAN_DISABLED;
1924 else if (test_bit(HCI_ISCAN, &hdev->flags))
1925 scan = SCAN_PAGE;
1926 else
1927 goto no_scan_update;
1928
1929 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1930 hdev->discov_timeout > 0)
1931 cancel_delayed_work(&hdev->discov_off);
1932 }
1933
1934 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1935 }
1936
1937 no_scan_update:
1938 /* If we're going from non-connectable to connectable or
1939 * vice-versa when fast connectable is enabled ensure that fast
1940 * connectable gets disabled. write_fast_connectable won't do
1941 * anything if the page scan parameters are already what they
1942 * should be.
1943 */
1944 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1945 write_fast_connectable(&req, false);
1946
1947 /* Update the advertising parameters if necessary */
1948 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1949 enable_advertising(&req);
1950
1951 err = hci_req_run(&req, set_connectable_complete);
1952 if (err < 0) {
1953 mgmt_pending_remove(cmd);
1954 if (err == -ENODATA)
1955 err = set_connectable_update_settings(hdev, sk,
1956 cp->val);
1957 goto failed;
1958 }
1959
1960 failed:
1961 hci_dev_unlock(hdev);
1962 return err;
1963 }
1964
1965 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1966 u16 len)
1967 {
1968 struct mgmt_mode *cp = data;
1969 bool changed;
1970 int err;
1971
1972 BT_DBG("request for %s", hdev->name);
1973
1974 if (cp->val != 0x00 && cp->val != 0x01)
1975 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1976 MGMT_STATUS_INVALID_PARAMS);
1977
1978 hci_dev_lock(hdev);
1979
1980 if (cp->val)
1981 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1982 else
1983 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1984
1985 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1986 if (err < 0)
1987 goto unlock;
1988
1989 if (changed)
1990 err = new_settings(hdev, sk);
1991
1992 unlock:
1993 hci_dev_unlock(hdev);
1994 return err;
1995 }
1996
1997 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1998 u16 len)
1999 {
2000 struct mgmt_mode *cp = data;
2001 struct pending_cmd *cmd;
2002 u8 val, status;
2003 int err;
2004
2005 BT_DBG("request for %s", hdev->name);
2006
2007 status = mgmt_bredr_support(hdev);
2008 if (status)
2009 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2010 status);
2011
2012 if (cp->val != 0x00 && cp->val != 0x01)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 MGMT_STATUS_INVALID_PARAMS);
2015
2016 hci_dev_lock(hdev);
2017
2018 if (!hdev_is_powered(hdev)) {
2019 bool changed = false;
2020
2021 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2022 &hdev->dev_flags)) {
2023 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2024 changed = true;
2025 }
2026
2027 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2028 if (err < 0)
2029 goto failed;
2030
2031 if (changed)
2032 err = new_settings(hdev, sk);
2033
2034 goto failed;
2035 }
2036
2037 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2039 MGMT_STATUS_BUSY);
2040 goto failed;
2041 }
2042
2043 val = !!cp->val;
2044
2045 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2046 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2047 goto failed;
2048 }
2049
2050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2051 if (!cmd) {
2052 err = -ENOMEM;
2053 goto failed;
2054 }
2055
2056 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2057 if (err < 0) {
2058 mgmt_pending_remove(cmd);
2059 goto failed;
2060 }
2061
2062 failed:
2063 hci_dev_unlock(hdev);
2064 return err;
2065 }
2066
2067 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 struct mgmt_mode *cp = data;
2070 struct pending_cmd *cmd;
2071 u8 status;
2072 int err;
2073
2074 BT_DBG("request for %s", hdev->name);
2075
2076 status = mgmt_bredr_support(hdev);
2077 if (status)
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2079
2080 if (!lmp_ssp_capable(hdev))
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_NOT_SUPPORTED);
2083
2084 if (cp->val != 0x00 && cp->val != 0x01)
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_INVALID_PARAMS);
2087
2088 hci_dev_lock(hdev);
2089
2090 if (!hdev_is_powered(hdev)) {
2091 bool changed;
2092
2093 if (cp->val) {
2094 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2095 &hdev->dev_flags);
2096 } else {
2097 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2098 &hdev->dev_flags);
2099 if (!changed)
2100 changed = test_and_clear_bit(HCI_HS_ENABLED,
2101 &hdev->dev_flags);
2102 else
2103 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 }
2105
2106 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2107 if (err < 0)
2108 goto failed;
2109
2110 if (changed)
2111 err = new_settings(hdev, sk);
2112
2113 goto failed;
2114 }
2115
2116 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2117 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2118 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2119 MGMT_STATUS_BUSY);
2120 goto failed;
2121 }
2122
2123 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2124 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2125 goto failed;
2126 }
2127
2128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2129 if (!cmd) {
2130 err = -ENOMEM;
2131 goto failed;
2132 }
2133
2134 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2135 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2136 sizeof(cp->val), &cp->val);
2137
2138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2139 if (err < 0) {
2140 mgmt_pending_remove(cmd);
2141 goto failed;
2142 }
2143
2144 failed:
2145 hci_dev_unlock(hdev);
2146 return err;
2147 }
2148
2149 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2150 {
2151 struct mgmt_mode *cp = data;
2152 bool changed;
2153 u8 status;
2154 int err;
2155
2156 BT_DBG("request for %s", hdev->name);
2157
2158 status = mgmt_bredr_support(hdev);
2159 if (status)
2160 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2161
2162 if (!lmp_ssp_capable(hdev))
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2164 MGMT_STATUS_NOT_SUPPORTED);
2165
2166 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2167 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2168 MGMT_STATUS_REJECTED);
2169
2170 if (cp->val != 0x00 && cp->val != 0x01)
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2172 MGMT_STATUS_INVALID_PARAMS);
2173
2174 hci_dev_lock(hdev);
2175
2176 if (cp->val) {
2177 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2178 } else {
2179 if (hdev_is_powered(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_REJECTED);
2182 goto unlock;
2183 }
2184
2185 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2186 }
2187
2188 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2189 if (err < 0)
2190 goto unlock;
2191
2192 if (changed)
2193 err = new_settings(hdev, sk);
2194
2195 unlock:
2196 hci_dev_unlock(hdev);
2197 return err;
2198 }
2199
2200 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2201 {
2202 struct cmd_lookup match = { NULL, hdev };
2203
2204 hci_dev_lock(hdev);
2205
2206 if (status) {
2207 u8 mgmt_err = mgmt_status(status);
2208
2209 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2210 &mgmt_err);
2211 goto unlock;
2212 }
2213
2214 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2215
2216 new_settings(hdev, match.sk);
2217
2218 if (match.sk)
2219 sock_put(match.sk);
2220
2221 /* Make sure the controller has a good default for
2222 * advertising data. Restrict the update to when LE
2223 * has actually been enabled. During power on, the
2224 * update in powered_update_hci will take care of it.
2225 */
2226 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2227 struct hci_request req;
2228
2229 hci_req_init(&req, hdev);
2230 update_adv_data(&req);
2231 update_scan_rsp_data(&req);
2232 __hci_update_background_scan(&req);
2233 hci_req_run(&req, NULL);
2234 }
2235
2236 unlock:
2237 hci_dev_unlock(hdev);
2238 }
2239
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2241 {
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2246 int err;
2247 u8 val, enabled;
2248
2249 BT_DBG("request for %s", hdev->name);
2250
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2254
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2258
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2263
2264 hci_dev_lock(hdev);
2265
2266 val = !!cp->val;
2267 enabled = lmp_host_le_capable(hdev);
2268
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2271
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2274 changed = true;
2275 }
2276
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2279 changed = true;
2280 }
2281
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2283 if (err < 0)
2284 goto unlock;
2285
2286 if (changed)
2287 err = new_settings(hdev, sk);
2288
2289 goto unlock;
2290 }
2291
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2295 MGMT_STATUS_BUSY);
2296 goto unlock;
2297 }
2298
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2300 if (!cmd) {
2301 err = -ENOMEM;
2302 goto unlock;
2303 }
2304
2305 hci_req_init(&req, hdev);
2306
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2308
2309 if (val) {
2310 hci_cp.le = val;
2311 hci_cp.simul = 0x00;
2312 } else {
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2315 }
2316
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2318 &hci_cp);
2319
2320 err = hci_req_run(&req, le_enable_complete);
2321 if (err < 0)
2322 mgmt_pending_remove(cmd);
2323
2324 unlock:
2325 hci_dev_unlock(hdev);
2326 return err;
2327 }
2328
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2334 */
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2336 {
2337 struct pending_cmd *cmd;
2338
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2345 return true;
2346 }
2347 }
2348
2349 return false;
2350 }
2351
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2355 };
2356
2357 static u8 get_uuid_size(const u8 *uuid)
2358 {
2359 u32 val;
2360
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2362 return 128;
2363
2364 val = get_unaligned_le32(&uuid[12]);
2365 if (val > 0xffff)
2366 return 32;
2367
2368 return 16;
2369 }
2370
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2372 {
2373 struct pending_cmd *cmd;
2374
2375 hci_dev_lock(hdev);
2376
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2378 if (!cmd)
2379 goto unlock;
2380
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2383
2384 mgmt_pending_remove(cmd);
2385
2386 unlock:
2387 hci_dev_unlock(hdev);
2388 }
2389
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2391 {
2392 BT_DBG("status 0x%02x", status);
2393
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2395 }
2396
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 {
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2403 int err;
2404
2405 BT_DBG("request for %s", hdev->name);
2406
2407 hci_dev_lock(hdev);
2408
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2411 MGMT_STATUS_BUSY);
2412 goto failed;
2413 }
2414
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2416 if (!uuid) {
2417 err = -ENOMEM;
2418 goto failed;
2419 }
2420
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2424
2425 list_add_tail(&uuid->list, &hdev->uuids);
2426
2427 hci_req_init(&req, hdev);
2428
2429 update_class(&req);
2430 update_eir(&req);
2431
2432 err = hci_req_run(&req, add_uuid_complete);
2433 if (err < 0) {
2434 if (err != -ENODATA)
2435 goto failed;
2436
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2439 goto failed;
2440 }
2441
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2443 if (!cmd) {
2444 err = -ENOMEM;
2445 goto failed;
2446 }
2447
2448 err = 0;
2449
2450 failed:
2451 hci_dev_unlock(hdev);
2452 return err;
2453 }
2454
2455 static bool enable_service_cache(struct hci_dev *hdev)
2456 {
2457 if (!hdev_is_powered(hdev))
2458 return false;
2459
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2462 CACHE_TIMEOUT);
2463 return true;
2464 }
2465
2466 return false;
2467 }
2468
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2470 {
2471 BT_DBG("status 0x%02x", status);
2472
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2474 }
2475
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2477 u16 len)
2478 {
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2484 int err, found;
2485
2486 BT_DBG("request for %s", hdev->name);
2487
2488 hci_dev_lock(hdev);
2489
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2492 MGMT_STATUS_BUSY);
2493 goto unlock;
2494 }
2495
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2498
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2502 goto unlock;
2503 }
2504
2505 goto update_class;
2506 }
2507
2508 found = 0;
2509
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2512 continue;
2513
2514 list_del(&match->list);
2515 kfree(match);
2516 found++;
2517 }
2518
2519 if (found == 0) {
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2522 goto unlock;
2523 }
2524
2525 update_class:
2526 hci_req_init(&req, hdev);
2527
2528 update_class(&req);
2529 update_eir(&req);
2530
2531 err = hci_req_run(&req, remove_uuid_complete);
2532 if (err < 0) {
2533 if (err != -ENODATA)
2534 goto unlock;
2535
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2538 goto unlock;
2539 }
2540
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2542 if (!cmd) {
2543 err = -ENOMEM;
2544 goto unlock;
2545 }
2546
2547 err = 0;
2548
2549 unlock:
2550 hci_dev_unlock(hdev);
2551 return err;
2552 }
2553
2554 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2555 {
2556 BT_DBG("status 0x%02x", status);
2557
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2559 }
2560
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2562 u16 len)
2563 {
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2567 int err;
2568
2569 BT_DBG("request for %s", hdev->name);
2570
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2574
2575 hci_dev_lock(hdev);
2576
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2579 MGMT_STATUS_BUSY);
2580 goto unlock;
2581 }
2582
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2586 goto unlock;
2587 }
2588
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2591
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2595 goto unlock;
2596 }
2597
2598 hci_req_init(&req, hdev);
2599
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2603 hci_dev_lock(hdev);
2604 update_eir(&req);
2605 }
2606
2607 update_class(&req);
2608
2609 err = hci_req_run(&req, set_class_complete);
2610 if (err < 0) {
2611 if (err != -ENODATA)
2612 goto unlock;
2613
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2616 goto unlock;
2617 }
2618
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2620 if (!cmd) {
2621 err = -ENOMEM;
2622 goto unlock;
2623 }
2624
2625 err = 0;
2626
2627 unlock:
2628 hci_dev_unlock(hdev);
2629 return err;
2630 }
2631
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2633 u16 len)
2634 {
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2639 bool changed;
2640 int i;
2641
2642 BT_DBG("request for %s", hdev->name);
2643
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2647
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2651 key_count);
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2654 }
2655
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2660 expected_len, len);
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2663 }
2664
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2668
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2670 key_count);
2671
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2674
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2678 }
2679
2680 hci_dev_lock(hdev);
2681
2682 hci_link_keys_clear(hdev);
2683
2684 if (cp->debug_keys)
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2686 &hdev->dev_flags);
2687 else
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2689 &hdev->dev_flags);
2690
2691 if (changed)
2692 new_settings(hdev, NULL);
2693
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2696
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2699 */
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2701 continue;
2702
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2705 }
2706
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2708
2709 hci_dev_unlock(hdev);
2710
2711 return 0;
2712 }
2713
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2716 {
2717 struct mgmt_ev_device_unpaired ev;
2718
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2721
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2723 skip_sk);
2724 }
2725
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2727 u16 len)
2728 {
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2734 int err;
2735
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2739
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2743 &rp, sizeof(rp));
2744
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2748 &rp, sizeof(rp));
2749
2750 hci_dev_lock(hdev);
2751
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2755 goto unlock;
2756 }
2757
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2762 *
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2765 */
2766 if (cp->disconnect)
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2768 &cp->addr.bdaddr);
2769 else
2770 conn = NULL;
2771
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2773 } else {
2774 u8 addr_type;
2775
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2777 &cp->addr.bdaddr);
2778 if (conn) {
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2782 */
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2784
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2788 */
2789 if (!cp->disconnect)
2790 conn = NULL;
2791 }
2792
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2795 else
2796 addr_type = ADDR_LE_DEV_RANDOM;
2797
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2799
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2801 }
2802
2803 if (err < 0) {
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2806 goto unlock;
2807 }
2808
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2811 */
2812 if (!conn) {
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2814 &rp, sizeof(rp));
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2816 goto unlock;
2817 }
2818
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2820 sizeof(*cp));
2821 if (!cmd) {
2822 err = -ENOMEM;
2823 goto unlock;
2824 }
2825
2826 cmd->cmd_complete = addr_cmd_complete;
2827
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 if (err < 0)
2832 mgmt_pending_remove(cmd);
2833
2834 unlock:
2835 hci_dev_unlock(hdev);
2836 return err;
2837 }
2838
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2840 u16 len)
2841 {
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2846 int err;
2847
2848 BT_DBG("");
2849
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2853
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2857 &rp, sizeof(rp));
2858
2859 hci_dev_lock(hdev);
2860
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2864 goto failed;
2865 }
2866
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2870 goto failed;
2871 }
2872
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2875 &cp->addr.bdaddr);
2876 else
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2878
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2882 goto failed;
2883 }
2884
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2886 if (!cmd) {
2887 err = -ENOMEM;
2888 goto failed;
2889 }
2890
2891 cmd->cmd_complete = generic_cmd_complete;
2892
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2894 if (err < 0)
2895 mgmt_pending_remove(cmd);
2896
2897 failed:
2898 hci_dev_unlock(hdev);
2899 return err;
2900 }
2901
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2903 {
2904 switch (link_type) {
2905 case LE_LINK:
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2909
2910 default:
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2913 }
2914
2915 default:
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2918 }
2919 }
2920
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2922 u16 data_len)
2923 {
2924 struct mgmt_rp_get_connections *rp;
2925 struct hci_conn *c;
2926 size_t rp_len;
2927 int err;
2928 u16 i;
2929
2930 BT_DBG("");
2931
2932 hci_dev_lock(hdev);
2933
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2937 goto unlock;
2938 }
2939
2940 i = 0;
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2943 i++;
2944 }
2945
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2948 if (!rp) {
2949 err = -ENOMEM;
2950 goto unlock;
2951 }
2952
2953 i = 0;
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2956 continue;
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2960 continue;
2961 i++;
2962 }
2963
2964 rp->conn_count = cpu_to_le16(i);
2965
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2968
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2970 rp_len);
2971
2972 kfree(rp);
2973
2974 unlock:
2975 hci_dev_unlock(hdev);
2976 return err;
2977 }
2978
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2981 {
2982 struct pending_cmd *cmd;
2983 int err;
2984
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2986 sizeof(*cp));
2987 if (!cmd)
2988 return -ENOMEM;
2989
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2992 if (err < 0)
2993 mgmt_pending_remove(cmd);
2994
2995 return err;
2996 }
2997
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2999 u16 len)
3000 {
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3005 int err;
3006
3007 BT_DBG("");
3008
3009 hci_dev_lock(hdev);
3010
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3014 goto failed;
3015 }
3016
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3018 if (!conn) {
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3021 goto failed;
3022 }
3023
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3026
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3028
3029 BT_ERR("PIN code is not 16 bytes long");
3030
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3032 if (err >= 0)
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3035
3036 goto failed;
3037 }
3038
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3040 if (!cmd) {
3041 err = -ENOMEM;
3042 goto failed;
3043 }
3044
3045 cmd->cmd_complete = addr_cmd_complete;
3046
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3050
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3052 if (err < 0)
3053 mgmt_pending_remove(cmd);
3054
3055 failed:
3056 hci_dev_unlock(hdev);
3057 return err;
3058 }
3059
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3061 u16 len)
3062 {
3063 struct mgmt_cp_set_io_capability *cp = data;
3064
3065 BT_DBG("");
3066
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3070
3071 hci_dev_lock(hdev);
3072
3073 hdev->io_capability = cp->io_capability;
3074
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3077
3078 hci_dev_unlock(hdev);
3079
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3081 0);
3082 }
3083
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3085 {
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3088
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3091 continue;
3092
3093 if (cmd->user_data != conn)
3094 continue;
3095
3096 return cmd;
3097 }
3098
3099 return NULL;
3100 }
3101
3102 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3103 {
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3106 int err;
3107
3108 bacpy(&rp.addr.bdaddr, &conn->dst);
3109 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3110
3111 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3112 &rp, sizeof(rp));
3113
3114 /* So we don't get further callbacks for this connection */
3115 conn->connect_cfm_cb = NULL;
3116 conn->security_cfm_cb = NULL;
3117 conn->disconn_cfm_cb = NULL;
3118
3119 hci_conn_drop(conn);
3120
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3123 */
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3125
3126 hci_conn_put(conn);
3127
3128 return err;
3129 }
3130
3131 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3132 {
3133 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3134 struct pending_cmd *cmd;
3135
3136 cmd = find_pairing(conn);
3137 if (cmd) {
3138 cmd->cmd_complete(cmd, status);
3139 mgmt_pending_remove(cmd);
3140 }
3141 }
3142
3143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3144 {
3145 struct pending_cmd *cmd;
3146
3147 BT_DBG("status %u", status);
3148
3149 cmd = find_pairing(conn);
3150 if (!cmd) {
3151 BT_DBG("Unable to find a pending command");
3152 return;
3153 }
3154
3155 cmd->cmd_complete(cmd, mgmt_status(status));
3156 mgmt_pending_remove(cmd);
3157 }
3158
3159 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3160 {
3161 struct pending_cmd *cmd;
3162
3163 BT_DBG("status %u", status);
3164
3165 if (!status)
3166 return;
3167
3168 cmd = find_pairing(conn);
3169 if (!cmd) {
3170 BT_DBG("Unable to find a pending command");
3171 return;
3172 }
3173
3174 cmd->cmd_complete(cmd, mgmt_status(status));
3175 mgmt_pending_remove(cmd);
3176 }
3177
3178 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3179 u16 len)
3180 {
3181 struct mgmt_cp_pair_device *cp = data;
3182 struct mgmt_rp_pair_device rp;
3183 struct pending_cmd *cmd;
3184 u8 sec_level, auth_type;
3185 struct hci_conn *conn;
3186 int err;
3187
3188 BT_DBG("");
3189
3190 memset(&rp, 0, sizeof(rp));
3191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3192 rp.addr.type = cp->addr.type;
3193
3194 if (!bdaddr_type_is_valid(cp->addr.type))
3195 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_INVALID_PARAMS,
3197 &rp, sizeof(rp));
3198
3199 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3200 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS,
3202 &rp, sizeof(rp));
3203
3204 hci_dev_lock(hdev);
3205
3206 if (!hdev_is_powered(hdev)) {
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3208 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3209 goto unlock;
3210 }
3211
3212 sec_level = BT_SECURITY_MEDIUM;
3213 auth_type = HCI_AT_DEDICATED_BONDING;
3214
3215 if (cp->addr.type == BDADDR_BREDR) {
3216 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3217 auth_type);
3218 } else {
3219 u8 addr_type;
3220
3221 /* Convert from L2CAP channel address type to HCI address type
3222 */
3223 if (cp->addr.type == BDADDR_LE_PUBLIC)
3224 addr_type = ADDR_LE_DEV_PUBLIC;
3225 else
3226 addr_type = ADDR_LE_DEV_RANDOM;
3227
3228 /* When pairing a new device, it is expected to remember
3229 * this device for future connections. Adding the connection
3230 * parameter information ahead of time allows tracking
3231 * of the slave preferred values and will speed up any
3232 * further connection establishment.
3233 *
3234 * If connection parameters already exist, then they
3235 * will be kept and this function does nothing.
3236 */
3237 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3238
3239 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3240 sec_level, HCI_LE_CONN_TIMEOUT,
3241 HCI_ROLE_MASTER);
3242 }
3243
3244 if (IS_ERR(conn)) {
3245 int status;
3246
3247 if (PTR_ERR(conn) == -EBUSY)
3248 status = MGMT_STATUS_BUSY;
3249 else
3250 status = MGMT_STATUS_CONNECT_FAILED;
3251
3252 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3253 status, &rp,
3254 sizeof(rp));
3255 goto unlock;
3256 }
3257
3258 if (conn->connect_cfm_cb) {
3259 hci_conn_drop(conn);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3261 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3262 goto unlock;
3263 }
3264
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3266 if (!cmd) {
3267 err = -ENOMEM;
3268 hci_conn_drop(conn);
3269 goto unlock;
3270 }
3271
3272 cmd->cmd_complete = pairing_complete;
3273
3274 /* For LE, just connecting isn't a proof that the pairing finished */
3275 if (cp->addr.type == BDADDR_BREDR) {
3276 conn->connect_cfm_cb = pairing_complete_cb;
3277 conn->security_cfm_cb = pairing_complete_cb;
3278 conn->disconn_cfm_cb = pairing_complete_cb;
3279 } else {
3280 conn->connect_cfm_cb = le_pairing_complete_cb;
3281 conn->security_cfm_cb = le_pairing_complete_cb;
3282 conn->disconn_cfm_cb = le_pairing_complete_cb;
3283 }
3284
3285 conn->io_capability = cp->io_cap;
3286 cmd->user_data = hci_conn_get(conn);
3287
3288 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3289 hci_conn_security(conn, sec_level, auth_type, true)) {
3290 cmd->cmd_complete(cmd, 0);
3291 mgmt_pending_remove(cmd);
3292 }
3293
3294 err = 0;
3295
3296 unlock:
3297 hci_dev_unlock(hdev);
3298 return err;
3299 }
3300
3301 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3302 u16 len)
3303 {
3304 struct mgmt_addr_info *addr = data;
3305 struct pending_cmd *cmd;
3306 struct hci_conn *conn;
3307 int err;
3308
3309 BT_DBG("");
3310
3311 hci_dev_lock(hdev);
3312
3313 if (!hdev_is_powered(hdev)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3315 MGMT_STATUS_NOT_POWERED);
3316 goto unlock;
3317 }
3318
3319 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3320 if (!cmd) {
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_INVALID_PARAMS);
3323 goto unlock;
3324 }
3325
3326 conn = cmd->user_data;
3327
3328 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3329 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3330 MGMT_STATUS_INVALID_PARAMS);
3331 goto unlock;
3332 }
3333
3334 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3335 mgmt_pending_remove(cmd);
3336
3337 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3338 addr, sizeof(*addr));
3339 unlock:
3340 hci_dev_unlock(hdev);
3341 return err;
3342 }
3343
3344 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3345 struct mgmt_addr_info *addr, u16 mgmt_op,
3346 u16 hci_op, __le32 passkey)
3347 {
3348 struct pending_cmd *cmd;
3349 struct hci_conn *conn;
3350 int err;
3351
3352 hci_dev_lock(hdev);
3353
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_complete(sk, hdev->id, mgmt_op,
3356 MGMT_STATUS_NOT_POWERED, addr,
3357 sizeof(*addr));
3358 goto done;
3359 }
3360
3361 if (addr->type == BDADDR_BREDR)
3362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3363 else
3364 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3365
3366 if (!conn) {
3367 err = cmd_complete(sk, hdev->id, mgmt_op,
3368 MGMT_STATUS_NOT_CONNECTED, addr,
3369 sizeof(*addr));
3370 goto done;
3371 }
3372
3373 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3374 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3375 if (!err)
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_SUCCESS, addr,
3378 sizeof(*addr));
3379 else
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_FAILED, addr,
3382 sizeof(*addr));
3383
3384 goto done;
3385 }
3386
3387 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3388 if (!cmd) {
3389 err = -ENOMEM;
3390 goto done;
3391 }
3392
3393 cmd->cmd_complete = addr_cmd_complete;
3394
3395 /* Continue with pairing via HCI */
3396 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3397 struct hci_cp_user_passkey_reply cp;
3398
3399 bacpy(&cp.bdaddr, &addr->bdaddr);
3400 cp.passkey = passkey;
3401 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3402 } else
3403 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3404 &addr->bdaddr);
3405
3406 if (err < 0)
3407 mgmt_pending_remove(cmd);
3408
3409 done:
3410 hci_dev_unlock(hdev);
3411 return err;
3412 }
3413
3414 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3416 {
3417 struct mgmt_cp_pin_code_neg_reply *cp = data;
3418
3419 BT_DBG("");
3420
3421 return user_pairing_resp(sk, hdev, &cp->addr,
3422 MGMT_OP_PIN_CODE_NEG_REPLY,
3423 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3424 }
3425
3426 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3427 u16 len)
3428 {
3429 struct mgmt_cp_user_confirm_reply *cp = data;
3430
3431 BT_DBG("");
3432
3433 if (len != sizeof(*cp))
3434 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3435 MGMT_STATUS_INVALID_PARAMS);
3436
3437 return user_pairing_resp(sk, hdev, &cp->addr,
3438 MGMT_OP_USER_CONFIRM_REPLY,
3439 HCI_OP_USER_CONFIRM_REPLY, 0);
3440 }
3441
3442 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3443 void *data, u16 len)
3444 {
3445 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3446
3447 BT_DBG("");
3448
3449 return user_pairing_resp(sk, hdev, &cp->addr,
3450 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3451 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3452 }
3453
3454 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3455 u16 len)
3456 {
3457 struct mgmt_cp_user_passkey_reply *cp = data;
3458
3459 BT_DBG("");
3460
3461 return user_pairing_resp(sk, hdev, &cp->addr,
3462 MGMT_OP_USER_PASSKEY_REPLY,
3463 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3464 }
3465
3466 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3467 void *data, u16 len)
3468 {
3469 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3470
3471 BT_DBG("");
3472
3473 return user_pairing_resp(sk, hdev, &cp->addr,
3474 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3475 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3476 }
3477
3478 static void update_name(struct hci_request *req)
3479 {
3480 struct hci_dev *hdev = req->hdev;
3481 struct hci_cp_write_local_name cp;
3482
3483 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3484
3485 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3486 }
3487
3488 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3489 {
3490 struct mgmt_cp_set_local_name *cp;
3491 struct pending_cmd *cmd;
3492
3493 BT_DBG("status 0x%02x", status);
3494
3495 hci_dev_lock(hdev);
3496
3497 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3498 if (!cmd)
3499 goto unlock;
3500
3501 cp = cmd->param;
3502
3503 if (status)
3504 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3505 mgmt_status(status));
3506 else
3507 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3508 cp, sizeof(*cp));
3509
3510 mgmt_pending_remove(cmd);
3511
3512 unlock:
3513 hci_dev_unlock(hdev);
3514 }
3515
3516 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3517 u16 len)
3518 {
3519 struct mgmt_cp_set_local_name *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3522 int err;
3523
3524 BT_DBG("");
3525
3526 hci_dev_lock(hdev);
3527
3528 /* If the old values are the same as the new ones just return a
3529 * direct command complete event.
3530 */
3531 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3532 !memcmp(hdev->short_name, cp->short_name,
3533 sizeof(hdev->short_name))) {
3534 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3535 data, len);
3536 goto failed;
3537 }
3538
3539 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3540
3541 if (!hdev_is_powered(hdev)) {
3542 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3543
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3545 data, len);
3546 if (err < 0)
3547 goto failed;
3548
3549 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3550 sk);
3551
3552 goto failed;
3553 }
3554
3555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3556 if (!cmd) {
3557 err = -ENOMEM;
3558 goto failed;
3559 }
3560
3561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3562
3563 hci_req_init(&req, hdev);
3564
3565 if (lmp_bredr_capable(hdev)) {
3566 update_name(&req);
3567 update_eir(&req);
3568 }
3569
3570 /* The name is stored in the scan response data and so
3571 * no need to udpate the advertising data here.
3572 */
3573 if (lmp_le_capable(hdev))
3574 update_scan_rsp_data(&req);
3575
3576 err = hci_req_run(&req, set_name_complete);
3577 if (err < 0)
3578 mgmt_pending_remove(cmd);
3579
3580 failed:
3581 hci_dev_unlock(hdev);
3582 return err;
3583 }
3584
3585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3586 void *data, u16 data_len)
3587 {
3588 struct pending_cmd *cmd;
3589 int err;
3590
3591 BT_DBG("%s", hdev->name);
3592
3593 hci_dev_lock(hdev);
3594
3595 if (!hdev_is_powered(hdev)) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3597 MGMT_STATUS_NOT_POWERED);
3598 goto unlock;
3599 }
3600
3601 if (!lmp_ssp_capable(hdev)) {
3602 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3603 MGMT_STATUS_NOT_SUPPORTED);
3604 goto unlock;
3605 }
3606
3607 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3609 MGMT_STATUS_BUSY);
3610 goto unlock;
3611 }
3612
3613 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3614 if (!cmd) {
3615 err = -ENOMEM;
3616 goto unlock;
3617 }
3618
3619 if (bredr_sc_enabled(hdev))
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3621 0, NULL);
3622 else
3623 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3624
3625 if (err < 0)
3626 mgmt_pending_remove(cmd);
3627
3628 unlock:
3629 hci_dev_unlock(hdev);
3630 return err;
3631 }
3632
3633 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3635 {
3636 int err;
3637
3638 BT_DBG("%s ", hdev->name);
3639
3640 hci_dev_lock(hdev);
3641
3642 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3643 struct mgmt_cp_add_remote_oob_data *cp = data;
3644 u8 status;
3645
3646 if (cp->addr.type != BDADDR_BREDR) {
3647 err = cmd_complete(sk, hdev->id,
3648 MGMT_OP_ADD_REMOTE_OOB_DATA,
3649 MGMT_STATUS_INVALID_PARAMS,
3650 &cp->addr, sizeof(cp->addr));
3651 goto unlock;
3652 }
3653
3654 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3655 cp->addr.type, cp->hash,
3656 cp->rand, NULL, NULL);
3657 if (err < 0)
3658 status = MGMT_STATUS_FAILED;
3659 else
3660 status = MGMT_STATUS_SUCCESS;
3661
3662 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3663 status, &cp->addr, sizeof(cp->addr));
3664 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3665 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3666 u8 *rand192, *hash192;
3667 u8 status;
3668
3669 if (cp->addr.type != BDADDR_BREDR) {
3670 err = cmd_complete(sk, hdev->id,
3671 MGMT_OP_ADD_REMOTE_OOB_DATA,
3672 MGMT_STATUS_INVALID_PARAMS,
3673 &cp->addr, sizeof(cp->addr));
3674 goto unlock;
3675 }
3676
3677 if (bdaddr_type_is_le(cp->addr.type)) {
3678 rand192 = NULL;
3679 hash192 = NULL;
3680 } else {
3681 rand192 = cp->rand192;
3682 hash192 = cp->hash192;
3683 }
3684
3685 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3686 cp->addr.type, hash192, rand192,
3687 cp->hash256, cp->rand256);
3688 if (err < 0)
3689 status = MGMT_STATUS_FAILED;
3690 else
3691 status = MGMT_STATUS_SUCCESS;
3692
3693 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3694 status, &cp->addr, sizeof(cp->addr));
3695 } else {
3696 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3697 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3698 MGMT_STATUS_INVALID_PARAMS);
3699 }
3700
3701 unlock:
3702 hci_dev_unlock(hdev);
3703 return err;
3704 }
3705
3706 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3707 void *data, u16 len)
3708 {
3709 struct mgmt_cp_remove_remote_oob_data *cp = data;
3710 u8 status;
3711 int err;
3712
3713 BT_DBG("%s", hdev->name);
3714
3715 if (cp->addr.type != BDADDR_BREDR)
3716 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3717 MGMT_STATUS_INVALID_PARAMS,
3718 &cp->addr, sizeof(cp->addr));
3719
3720 hci_dev_lock(hdev);
3721
3722 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3723 hci_remote_oob_data_clear(hdev);
3724 status = MGMT_STATUS_SUCCESS;
3725 goto done;
3726 }
3727
3728 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3729 if (err < 0)
3730 status = MGMT_STATUS_INVALID_PARAMS;
3731 else
3732 status = MGMT_STATUS_SUCCESS;
3733
3734 done:
3735 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3736 status, &cp->addr, sizeof(cp->addr));
3737
3738 hci_dev_unlock(hdev);
3739 return err;
3740 }
3741
3742 static bool trigger_discovery(struct hci_request *req, u8 *status)
3743 {
3744 struct hci_dev *hdev = req->hdev;
3745 struct hci_cp_le_set_scan_param param_cp;
3746 struct hci_cp_le_set_scan_enable enable_cp;
3747 struct hci_cp_inquiry inq_cp;
3748 /* General inquiry access code (GIAC) */
3749 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3750 u8 own_addr_type;
3751 int err;
3752
3753 switch (hdev->discovery.type) {
3754 case DISCOV_TYPE_BREDR:
3755 *status = mgmt_bredr_support(hdev);
3756 if (*status)
3757 return false;
3758
3759 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3760 *status = MGMT_STATUS_BUSY;
3761 return false;
3762 }
3763
3764 hci_inquiry_cache_flush(hdev);
3765
3766 memset(&inq_cp, 0, sizeof(inq_cp));
3767 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3768 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3769 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3770 break;
3771
3772 case DISCOV_TYPE_LE:
3773 case DISCOV_TYPE_INTERLEAVED:
3774 *status = mgmt_le_support(hdev);
3775 if (*status)
3776 return false;
3777
3778 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3779 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3780 *status = MGMT_STATUS_NOT_SUPPORTED;
3781 return false;
3782 }
3783
3784 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3785 /* Don't let discovery abort an outgoing
3786 * connection attempt that's using directed
3787 * advertising.
3788 */
3789 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3790 BT_CONNECT)) {
3791 *status = MGMT_STATUS_REJECTED;
3792 return false;
3793 }
3794
3795 disable_advertising(req);
3796 }
3797
3798 /* If controller is scanning, it means the background scanning
3799 * is running. Thus, we should temporarily stop it in order to
3800 * set the discovery scanning parameters.
3801 */
3802 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3803 hci_req_add_le_scan_disable(req);
3804
3805 memset(&param_cp, 0, sizeof(param_cp));
3806
3807 /* All active scans will be done with either a resolvable
3808 * private address (when privacy feature has been enabled)
3809 * or non-resolvable private address.
3810 */
3811 err = hci_update_random_address(req, true, &own_addr_type);
3812 if (err < 0) {
3813 *status = MGMT_STATUS_FAILED;
3814 return false;
3815 }
3816
3817 param_cp.type = LE_SCAN_ACTIVE;
3818 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3819 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3820 param_cp.own_address_type = own_addr_type;
3821 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3822 &param_cp);
3823
3824 memset(&enable_cp, 0, sizeof(enable_cp));
3825 enable_cp.enable = LE_SCAN_ENABLE;
3826 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3827 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3828 &enable_cp);
3829 break;
3830
3831 default:
3832 *status = MGMT_STATUS_INVALID_PARAMS;
3833 return false;
3834 }
3835
3836 return true;
3837 }
3838
3839 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3840 u16 opcode)
3841 {
3842 struct pending_cmd *cmd;
3843 unsigned long timeout;
3844
3845 BT_DBG("status %d", status);
3846
3847 hci_dev_lock(hdev);
3848
3849 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3850 if (!cmd)
3851 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3852
3853 if (cmd) {
3854 cmd->cmd_complete(cmd, mgmt_status(status));
3855 mgmt_pending_remove(cmd);
3856 }
3857
3858 if (status) {
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860 goto unlock;
3861 }
3862
3863 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3864
3865 switch (hdev->discovery.type) {
3866 case DISCOV_TYPE_LE:
3867 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3868 break;
3869 case DISCOV_TYPE_INTERLEAVED:
3870 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3871 break;
3872 case DISCOV_TYPE_BREDR:
3873 timeout = 0;
3874 break;
3875 default:
3876 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3877 timeout = 0;
3878 break;
3879 }
3880
3881 if (timeout)
3882 queue_delayed_work(hdev->workqueue,
3883 &hdev->le_scan_disable, timeout);
3884
3885 unlock:
3886 hci_dev_unlock(hdev);
3887 }
3888
3889 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 len)
3891 {
3892 struct mgmt_cp_start_discovery *cp = data;
3893 struct pending_cmd *cmd;
3894 struct hci_request req;
3895 u8 status;
3896 int err;
3897
3898 BT_DBG("%s", hdev->name);
3899
3900 hci_dev_lock(hdev);
3901
3902 if (!hdev_is_powered(hdev)) {
3903 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3904 MGMT_STATUS_NOT_POWERED,
3905 &cp->type, sizeof(cp->type));
3906 goto failed;
3907 }
3908
3909 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3910 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3911 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3912 MGMT_STATUS_BUSY, &cp->type,
3913 sizeof(cp->type));
3914 goto failed;
3915 }
3916
3917 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3918 if (!cmd) {
3919 err = -ENOMEM;
3920 goto failed;
3921 }
3922
3923 cmd->cmd_complete = generic_cmd_complete;
3924
3925 /* Clear the discovery filter first to free any previously
3926 * allocated memory for the UUID list.
3927 */
3928 hci_discovery_filter_clear(hdev);
3929
3930 hdev->discovery.type = cp->type;
3931 hdev->discovery.report_invalid_rssi = false;
3932
3933 hci_req_init(&req, hdev);
3934
3935 if (!trigger_discovery(&req, &status)) {
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3937 status, &cp->type, sizeof(cp->type));
3938 mgmt_pending_remove(cmd);
3939 goto failed;
3940 }
3941
3942 err = hci_req_run(&req, start_discovery_complete);
3943 if (err < 0) {
3944 mgmt_pending_remove(cmd);
3945 goto failed;
3946 }
3947
3948 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3949
3950 failed:
3951 hci_dev_unlock(hdev);
3952 return err;
3953 }
3954
3955 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3956 {
3957 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3958 cmd->param, 1);
3959 }
3960
3961 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3962 void *data, u16 len)
3963 {
3964 struct mgmt_cp_start_service_discovery *cp = data;
3965 struct pending_cmd *cmd;
3966 struct hci_request req;
3967 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3968 u16 uuid_count, expected_len;
3969 u8 status;
3970 int err;
3971
3972 BT_DBG("%s", hdev->name);
3973
3974 hci_dev_lock(hdev);
3975
3976 if (!hdev_is_powered(hdev)) {
3977 err = cmd_complete(sk, hdev->id,
3978 MGMT_OP_START_SERVICE_DISCOVERY,
3979 MGMT_STATUS_NOT_POWERED,
3980 &cp->type, sizeof(cp->type));
3981 goto failed;
3982 }
3983
3984 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3985 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3986 err = cmd_complete(sk, hdev->id,
3987 MGMT_OP_START_SERVICE_DISCOVERY,
3988 MGMT_STATUS_BUSY, &cp->type,
3989 sizeof(cp->type));
3990 goto failed;
3991 }
3992
3993 uuid_count = __le16_to_cpu(cp->uuid_count);
3994 if (uuid_count > max_uuid_count) {
3995 BT_ERR("service_discovery: too big uuid_count value %u",
3996 uuid_count);
3997 err = cmd_complete(sk, hdev->id,
3998 MGMT_OP_START_SERVICE_DISCOVERY,
3999 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4000 sizeof(cp->type));
4001 goto failed;
4002 }
4003
4004 expected_len = sizeof(*cp) + uuid_count * 16;
4005 if (expected_len != len) {
4006 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4007 expected_len, len);
4008 err = cmd_complete(sk, hdev->id,
4009 MGMT_OP_START_SERVICE_DISCOVERY,
4010 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4011 sizeof(cp->type));
4012 goto failed;
4013 }
4014
4015 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4016 hdev, data, len);
4017 if (!cmd) {
4018 err = -ENOMEM;
4019 goto failed;
4020 }
4021
4022 cmd->cmd_complete = service_discovery_cmd_complete;
4023
4024 /* Clear the discovery filter first to free any previously
4025 * allocated memory for the UUID list.
4026 */
4027 hci_discovery_filter_clear(hdev);
4028
4029 hdev->discovery.type = cp->type;
4030 hdev->discovery.rssi = cp->rssi;
4031 hdev->discovery.uuid_count = uuid_count;
4032
4033 if (uuid_count > 0) {
4034 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4035 GFP_KERNEL);
4036 if (!hdev->discovery.uuids) {
4037 err = cmd_complete(sk, hdev->id,
4038 MGMT_OP_START_SERVICE_DISCOVERY,
4039 MGMT_STATUS_FAILED,
4040 &cp->type, sizeof(cp->type));
4041 mgmt_pending_remove(cmd);
4042 goto failed;
4043 }
4044 }
4045
4046 hci_req_init(&req, hdev);
4047
4048 if (!trigger_discovery(&req, &status)) {
4049 err = cmd_complete(sk, hdev->id,
4050 MGMT_OP_START_SERVICE_DISCOVERY,
4051 status, &cp->type, sizeof(cp->type));
4052 mgmt_pending_remove(cmd);
4053 goto failed;
4054 }
4055
4056 err = hci_req_run(&req, start_discovery_complete);
4057 if (err < 0) {
4058 mgmt_pending_remove(cmd);
4059 goto failed;
4060 }
4061
4062 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4063
4064 failed:
4065 hci_dev_unlock(hdev);
4066 return err;
4067 }
4068
4069 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4070 {
4071 struct pending_cmd *cmd;
4072
4073 BT_DBG("status %d", status);
4074
4075 hci_dev_lock(hdev);
4076
4077 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4078 if (cmd) {
4079 cmd->cmd_complete(cmd, mgmt_status(status));
4080 mgmt_pending_remove(cmd);
4081 }
4082
4083 if (!status)
4084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4085
4086 hci_dev_unlock(hdev);
4087 }
4088
4089 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4090 u16 len)
4091 {
4092 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4093 struct pending_cmd *cmd;
4094 struct hci_request req;
4095 int err;
4096
4097 BT_DBG("%s", hdev->name);
4098
4099 hci_dev_lock(hdev);
4100
4101 if (!hci_discovery_active(hdev)) {
4102 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4103 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4104 sizeof(mgmt_cp->type));
4105 goto unlock;
4106 }
4107
4108 if (hdev->discovery.type != mgmt_cp->type) {
4109 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4110 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4111 sizeof(mgmt_cp->type));
4112 goto unlock;
4113 }
4114
4115 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4116 if (!cmd) {
4117 err = -ENOMEM;
4118 goto unlock;
4119 }
4120
4121 cmd->cmd_complete = generic_cmd_complete;
4122
4123 hci_req_init(&req, hdev);
4124
4125 hci_stop_discovery(&req);
4126
4127 err = hci_req_run(&req, stop_discovery_complete);
4128 if (!err) {
4129 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4130 goto unlock;
4131 }
4132
4133 mgmt_pending_remove(cmd);
4134
4135 /* If no HCI commands were sent we're done */
4136 if (err == -ENODATA) {
4137 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4138 &mgmt_cp->type, sizeof(mgmt_cp->type));
4139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4140 }
4141
4142 unlock:
4143 hci_dev_unlock(hdev);
4144 return err;
4145 }
4146
4147 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4148 u16 len)
4149 {
4150 struct mgmt_cp_confirm_name *cp = data;
4151 struct inquiry_entry *e;
4152 int err;
4153
4154 BT_DBG("%s", hdev->name);
4155
4156 hci_dev_lock(hdev);
4157
4158 if (!hci_discovery_active(hdev)) {
4159 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4160 MGMT_STATUS_FAILED, &cp->addr,
4161 sizeof(cp->addr));
4162 goto failed;
4163 }
4164
4165 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4166 if (!e) {
4167 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4168 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4169 sizeof(cp->addr));
4170 goto failed;
4171 }
4172
4173 if (cp->name_known) {
4174 e->name_state = NAME_KNOWN;
4175 list_del(&e->list);
4176 } else {
4177 e->name_state = NAME_NEEDED;
4178 hci_inquiry_cache_update_resolve(hdev, e);
4179 }
4180
4181 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4182 sizeof(cp->addr));
4183
4184 failed:
4185 hci_dev_unlock(hdev);
4186 return err;
4187 }
4188
4189 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4190 u16 len)
4191 {
4192 struct mgmt_cp_block_device *cp = data;
4193 u8 status;
4194 int err;
4195
4196 BT_DBG("%s", hdev->name);
4197
4198 if (!bdaddr_type_is_valid(cp->addr.type))
4199 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4200 MGMT_STATUS_INVALID_PARAMS,
4201 &cp->addr, sizeof(cp->addr));
4202
4203 hci_dev_lock(hdev);
4204
4205 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4206 cp->addr.type);
4207 if (err < 0) {
4208 status = MGMT_STATUS_FAILED;
4209 goto done;
4210 }
4211
4212 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4213 sk);
4214 status = MGMT_STATUS_SUCCESS;
4215
4216 done:
4217 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4218 &cp->addr, sizeof(cp->addr));
4219
4220 hci_dev_unlock(hdev);
4221
4222 return err;
4223 }
4224
4225 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4226 u16 len)
4227 {
4228 struct mgmt_cp_unblock_device *cp = data;
4229 u8 status;
4230 int err;
4231
4232 BT_DBG("%s", hdev->name);
4233
4234 if (!bdaddr_type_is_valid(cp->addr.type))
4235 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4236 MGMT_STATUS_INVALID_PARAMS,
4237 &cp->addr, sizeof(cp->addr));
4238
4239 hci_dev_lock(hdev);
4240
4241 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4242 cp->addr.type);
4243 if (err < 0) {
4244 status = MGMT_STATUS_INVALID_PARAMS;
4245 goto done;
4246 }
4247
4248 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4249 sk);
4250 status = MGMT_STATUS_SUCCESS;
4251
4252 done:
4253 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4254 &cp->addr, sizeof(cp->addr));
4255
4256 hci_dev_unlock(hdev);
4257
4258 return err;
4259 }
4260
4261 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4262 u16 len)
4263 {
4264 struct mgmt_cp_set_device_id *cp = data;
4265 struct hci_request req;
4266 int err;
4267 __u16 source;
4268
4269 BT_DBG("%s", hdev->name);
4270
4271 source = __le16_to_cpu(cp->source);
4272
4273 if (source > 0x0002)
4274 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4275 MGMT_STATUS_INVALID_PARAMS);
4276
4277 hci_dev_lock(hdev);
4278
4279 hdev->devid_source = source;
4280 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4281 hdev->devid_product = __le16_to_cpu(cp->product);
4282 hdev->devid_version = __le16_to_cpu(cp->version);
4283
4284 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4285
4286 hci_req_init(&req, hdev);
4287 update_eir(&req);
4288 hci_req_run(&req, NULL);
4289
4290 hci_dev_unlock(hdev);
4291
4292 return err;
4293 }
4294
4295 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4296 u16 opcode)
4297 {
4298 struct cmd_lookup match = { NULL, hdev };
4299
4300 hci_dev_lock(hdev);
4301
4302 if (status) {
4303 u8 mgmt_err = mgmt_status(status);
4304
4305 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4306 cmd_status_rsp, &mgmt_err);
4307 goto unlock;
4308 }
4309
4310 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4311 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4312 else
4313 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4314
4315 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4316 &match);
4317
4318 new_settings(hdev, match.sk);
4319
4320 if (match.sk)
4321 sock_put(match.sk);
4322
4323 unlock:
4324 hci_dev_unlock(hdev);
4325 }
4326
4327 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4328 u16 len)
4329 {
4330 struct mgmt_mode *cp = data;
4331 struct pending_cmd *cmd;
4332 struct hci_request req;
4333 u8 val, enabled, status;
4334 int err;
4335
4336 BT_DBG("request for %s", hdev->name);
4337
4338 status = mgmt_le_support(hdev);
4339 if (status)
4340 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4341 status);
4342
4343 if (cp->val != 0x00 && cp->val != 0x01)
4344 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4345 MGMT_STATUS_INVALID_PARAMS);
4346
4347 hci_dev_lock(hdev);
4348
4349 val = !!cp->val;
4350 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4351
4352 /* The following conditions are ones which mean that we should
4353 * not do any HCI communication but directly send a mgmt
4354 * response to user space (after toggling the flag if
4355 * necessary).
4356 */
4357 if (!hdev_is_powered(hdev) || val == enabled ||
4358 hci_conn_num(hdev, LE_LINK) > 0 ||
4359 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4360 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4361 bool changed = false;
4362
4363 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4364 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4365 changed = true;
4366 }
4367
4368 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4369 if (err < 0)
4370 goto unlock;
4371
4372 if (changed)
4373 err = new_settings(hdev, sk);
4374
4375 goto unlock;
4376 }
4377
4378 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4379 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4380 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4381 MGMT_STATUS_BUSY);
4382 goto unlock;
4383 }
4384
4385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4386 if (!cmd) {
4387 err = -ENOMEM;
4388 goto unlock;
4389 }
4390
4391 hci_req_init(&req, hdev);
4392
4393 if (val)
4394 enable_advertising(&req);
4395 else
4396 disable_advertising(&req);
4397
4398 err = hci_req_run(&req, set_advertising_complete);
4399 if (err < 0)
4400 mgmt_pending_remove(cmd);
4401
4402 unlock:
4403 hci_dev_unlock(hdev);
4404 return err;
4405 }
4406
4407 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4408 void *data, u16 len)
4409 {
4410 struct mgmt_cp_set_static_address *cp = data;
4411 int err;
4412
4413 BT_DBG("%s", hdev->name);
4414
4415 if (!lmp_le_capable(hdev))
4416 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4417 MGMT_STATUS_NOT_SUPPORTED);
4418
4419 if (hdev_is_powered(hdev))
4420 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_REJECTED);
4422
4423 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4424 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4425 return cmd_status(sk, hdev->id,
4426 MGMT_OP_SET_STATIC_ADDRESS,
4427 MGMT_STATUS_INVALID_PARAMS);
4428
4429 /* Two most significant bits shall be set */
4430 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4431 return cmd_status(sk, hdev->id,
4432 MGMT_OP_SET_STATIC_ADDRESS,
4433 MGMT_STATUS_INVALID_PARAMS);
4434 }
4435
4436 hci_dev_lock(hdev);
4437
4438 bacpy(&hdev->static_addr, &cp->bdaddr);
4439
4440 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4441
4442 hci_dev_unlock(hdev);
4443
4444 return err;
4445 }
4446
4447 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4448 void *data, u16 len)
4449 {
4450 struct mgmt_cp_set_scan_params *cp = data;
4451 __u16 interval, window;
4452 int err;
4453
4454 BT_DBG("%s", hdev->name);
4455
4456 if (!lmp_le_capable(hdev))
4457 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4458 MGMT_STATUS_NOT_SUPPORTED);
4459
4460 interval = __le16_to_cpu(cp->interval);
4461
4462 if (interval < 0x0004 || interval > 0x4000)
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4464 MGMT_STATUS_INVALID_PARAMS);
4465
4466 window = __le16_to_cpu(cp->window);
4467
4468 if (window < 0x0004 || window > 0x4000)
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4470 MGMT_STATUS_INVALID_PARAMS);
4471
4472 if (window > interval)
4473 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4474 MGMT_STATUS_INVALID_PARAMS);
4475
4476 hci_dev_lock(hdev);
4477
4478 hdev->le_scan_interval = interval;
4479 hdev->le_scan_window = window;
4480
4481 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4482
4483 /* If background scan is running, restart it so new parameters are
4484 * loaded.
4485 */
4486 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4487 hdev->discovery.state == DISCOVERY_STOPPED) {
4488 struct hci_request req;
4489
4490 hci_req_init(&req, hdev);
4491
4492 hci_req_add_le_scan_disable(&req);
4493 hci_req_add_le_passive_scan(&req);
4494
4495 hci_req_run(&req, NULL);
4496 }
4497
4498 hci_dev_unlock(hdev);
4499
4500 return err;
4501 }
4502
4503 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4504 u16 opcode)
4505 {
4506 struct pending_cmd *cmd;
4507
4508 BT_DBG("status 0x%02x", status);
4509
4510 hci_dev_lock(hdev);
4511
4512 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4513 if (!cmd)
4514 goto unlock;
4515
4516 if (status) {
4517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4518 mgmt_status(status));
4519 } else {
4520 struct mgmt_mode *cp = cmd->param;
4521
4522 if (cp->val)
4523 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4524 else
4525 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4526
4527 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4528 new_settings(hdev, cmd->sk);
4529 }
4530
4531 mgmt_pending_remove(cmd);
4532
4533 unlock:
4534 hci_dev_unlock(hdev);
4535 }
4536
4537 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4538 void *data, u16 len)
4539 {
4540 struct mgmt_mode *cp = data;
4541 struct pending_cmd *cmd;
4542 struct hci_request req;
4543 int err;
4544
4545 BT_DBG("%s", hdev->name);
4546
4547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4548 hdev->hci_ver < BLUETOOTH_VER_1_2)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 MGMT_STATUS_NOT_SUPPORTED);
4551
4552 if (cp->val != 0x00 && cp->val != 0x01)
4553 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 MGMT_STATUS_INVALID_PARAMS);
4555
4556 if (!hdev_is_powered(hdev))
4557 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4558 MGMT_STATUS_NOT_POWERED);
4559
4560 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4561 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4562 MGMT_STATUS_REJECTED);
4563
4564 hci_dev_lock(hdev);
4565
4566 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4567 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 MGMT_STATUS_BUSY);
4569 goto unlock;
4570 }
4571
4572 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4573 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4574 hdev);
4575 goto unlock;
4576 }
4577
4578 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4579 data, len);
4580 if (!cmd) {
4581 err = -ENOMEM;
4582 goto unlock;
4583 }
4584
4585 hci_req_init(&req, hdev);
4586
4587 write_fast_connectable(&req, cp->val);
4588
4589 err = hci_req_run(&req, fast_connectable_complete);
4590 if (err < 0) {
4591 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4592 MGMT_STATUS_FAILED);
4593 mgmt_pending_remove(cmd);
4594 }
4595
4596 unlock:
4597 hci_dev_unlock(hdev);
4598
4599 return err;
4600 }
4601
4602 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4603 {
4604 struct pending_cmd *cmd;
4605
4606 BT_DBG("status 0x%02x", status);
4607
4608 hci_dev_lock(hdev);
4609
4610 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4611 if (!cmd)
4612 goto unlock;
4613
4614 if (status) {
4615 u8 mgmt_err = mgmt_status(status);
4616
4617 /* We need to restore the flag if related HCI commands
4618 * failed.
4619 */
4620 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4621
4622 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4623 } else {
4624 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4625 new_settings(hdev, cmd->sk);
4626 }
4627
4628 mgmt_pending_remove(cmd);
4629
4630 unlock:
4631 hci_dev_unlock(hdev);
4632 }
4633
4634 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4635 {
4636 struct mgmt_mode *cp = data;
4637 struct pending_cmd *cmd;
4638 struct hci_request req;
4639 int err;
4640
4641 BT_DBG("request for %s", hdev->name);
4642
4643 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4644 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 MGMT_STATUS_NOT_SUPPORTED);
4646
4647 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4648 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4649 MGMT_STATUS_REJECTED);
4650
4651 if (cp->val != 0x00 && cp->val != 0x01)
4652 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4653 MGMT_STATUS_INVALID_PARAMS);
4654
4655 hci_dev_lock(hdev);
4656
4657 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4658 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4659 goto unlock;
4660 }
4661
4662 if (!hdev_is_powered(hdev)) {
4663 if (!cp->val) {
4664 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4665 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4666 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4667 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4668 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4669 }
4670
4671 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4672
4673 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4674 if (err < 0)
4675 goto unlock;
4676
4677 err = new_settings(hdev, sk);
4678 goto unlock;
4679 }
4680
4681 /* Reject disabling when powered on */
4682 if (!cp->val) {
4683 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4684 MGMT_STATUS_REJECTED);
4685 goto unlock;
4686 } else {
4687 /* When configuring a dual-mode controller to operate
4688 * with LE only and using a static address, then switching
4689 * BR/EDR back on is not allowed.
4690 *
4691 * Dual-mode controllers shall operate with the public
4692 * address as its identity address for BR/EDR and LE. So
4693 * reject the attempt to create an invalid configuration.
4694 *
4695 * The same restrictions applies when secure connections
4696 * has been enabled. For BR/EDR this is a controller feature
4697 * while for LE it is a host stack feature. This means that
4698 * switching BR/EDR back on when secure connections has been
4699 * enabled is not a supported transaction.
4700 */
4701 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4702 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4703 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4704 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4705 MGMT_STATUS_REJECTED);
4706 goto unlock;
4707 }
4708 }
4709
4710 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4711 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4712 MGMT_STATUS_BUSY);
4713 goto unlock;
4714 }
4715
4716 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4717 if (!cmd) {
4718 err = -ENOMEM;
4719 goto unlock;
4720 }
4721
4722 /* We need to flip the bit already here so that update_adv_data
4723 * generates the correct flags.
4724 */
4725 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4726
4727 hci_req_init(&req, hdev);
4728
4729 write_fast_connectable(&req, false);
4730 __hci_update_page_scan(&req);
4731
4732 /* Since only the advertising data flags will change, there
4733 * is no need to update the scan response data.
4734 */
4735 update_adv_data(&req);
4736
4737 err = hci_req_run(&req, set_bredr_complete);
4738 if (err < 0)
4739 mgmt_pending_remove(cmd);
4740
4741 unlock:
4742 hci_dev_unlock(hdev);
4743 return err;
4744 }
4745
4746 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4747 void *data, u16 len)
4748 {
4749 struct mgmt_mode *cp = data;
4750 struct pending_cmd *cmd;
4751 u8 val;
4752 int err;
4753
4754 BT_DBG("request for %s", hdev->name);
4755
4756 if (!lmp_sc_capable(hdev) &&
4757 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4758 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4759 MGMT_STATUS_NOT_SUPPORTED);
4760
4761 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4762 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4763 MGMT_STATUS_INVALID_PARAMS);
4764
4765 hci_dev_lock(hdev);
4766
4767 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4768 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4769 bool changed;
4770
4771 if (cp->val) {
4772 changed = !test_and_set_bit(HCI_SC_ENABLED,
4773 &hdev->dev_flags);
4774 if (cp->val == 0x02)
4775 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4776 else
4777 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4778 } else {
4779 changed = test_and_clear_bit(HCI_SC_ENABLED,
4780 &hdev->dev_flags);
4781 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4782 }
4783
4784 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4785 if (err < 0)
4786 goto failed;
4787
4788 if (changed)
4789 err = new_settings(hdev, sk);
4790
4791 goto failed;
4792 }
4793
4794 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4795 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4796 MGMT_STATUS_BUSY);
4797 goto failed;
4798 }
4799
4800 val = !!cp->val;
4801
4802 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4803 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4804 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4805 goto failed;
4806 }
4807
4808 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4809 if (!cmd) {
4810 err = -ENOMEM;
4811 goto failed;
4812 }
4813
4814 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4815 if (err < 0) {
4816 mgmt_pending_remove(cmd);
4817 goto failed;
4818 }
4819
4820 if (cp->val == 0x02)
4821 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4822 else
4823 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4824
4825 failed:
4826 hci_dev_unlock(hdev);
4827 return err;
4828 }
4829
4830 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4831 void *data, u16 len)
4832 {
4833 struct mgmt_mode *cp = data;
4834 bool changed, use_changed;
4835 int err;
4836
4837 BT_DBG("request for %s", hdev->name);
4838
4839 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4840 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4841 MGMT_STATUS_INVALID_PARAMS);
4842
4843 hci_dev_lock(hdev);
4844
4845 if (cp->val)
4846 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4847 &hdev->dev_flags);
4848 else
4849 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4850 &hdev->dev_flags);
4851
4852 if (cp->val == 0x02)
4853 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4854 &hdev->dev_flags);
4855 else
4856 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4857 &hdev->dev_flags);
4858
4859 if (hdev_is_powered(hdev) && use_changed &&
4860 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4861 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4862 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4863 sizeof(mode), &mode);
4864 }
4865
4866 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4867 if (err < 0)
4868 goto unlock;
4869
4870 if (changed)
4871 err = new_settings(hdev, sk);
4872
4873 unlock:
4874 hci_dev_unlock(hdev);
4875 return err;
4876 }
4877
4878 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4879 u16 len)
4880 {
4881 struct mgmt_cp_set_privacy *cp = cp_data;
4882 bool changed;
4883 int err;
4884
4885 BT_DBG("request for %s", hdev->name);
4886
4887 if (!lmp_le_capable(hdev))
4888 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4889 MGMT_STATUS_NOT_SUPPORTED);
4890
4891 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4892 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4893 MGMT_STATUS_INVALID_PARAMS);
4894
4895 if (hdev_is_powered(hdev))
4896 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4897 MGMT_STATUS_REJECTED);
4898
4899 hci_dev_lock(hdev);
4900
4901 /* If user space supports this command it is also expected to
4902 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4903 */
4904 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4905
4906 if (cp->privacy) {
4907 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4908 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4909 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4910 } else {
4911 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4912 memset(hdev->irk, 0, sizeof(hdev->irk));
4913 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4914 }
4915
4916 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4917 if (err < 0)
4918 goto unlock;
4919
4920 if (changed)
4921 err = new_settings(hdev, sk);
4922
4923 unlock:
4924 hci_dev_unlock(hdev);
4925 return err;
4926 }
4927
4928 static bool irk_is_valid(struct mgmt_irk_info *irk)
4929 {
4930 switch (irk->addr.type) {
4931 case BDADDR_LE_PUBLIC:
4932 return true;
4933
4934 case BDADDR_LE_RANDOM:
4935 /* Two most significant bits shall be set */
4936 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4937 return false;
4938 return true;
4939 }
4940
4941 return false;
4942 }
4943
4944 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4945 u16 len)
4946 {
4947 struct mgmt_cp_load_irks *cp = cp_data;
4948 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4949 sizeof(struct mgmt_irk_info));
4950 u16 irk_count, expected_len;
4951 int i, err;
4952
4953 BT_DBG("request for %s", hdev->name);
4954
4955 if (!lmp_le_capable(hdev))
4956 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4957 MGMT_STATUS_NOT_SUPPORTED);
4958
4959 irk_count = __le16_to_cpu(cp->irk_count);
4960 if (irk_count > max_irk_count) {
4961 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4962 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4963 MGMT_STATUS_INVALID_PARAMS);
4964 }
4965
4966 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4967 if (expected_len != len) {
4968 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4969 expected_len, len);
4970 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4971 MGMT_STATUS_INVALID_PARAMS);
4972 }
4973
4974 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4975
4976 for (i = 0; i < irk_count; i++) {
4977 struct mgmt_irk_info *key = &cp->irks[i];
4978
4979 if (!irk_is_valid(key))
4980 return cmd_status(sk, hdev->id,
4981 MGMT_OP_LOAD_IRKS,
4982 MGMT_STATUS_INVALID_PARAMS);
4983 }
4984
4985 hci_dev_lock(hdev);
4986
4987 hci_smp_irks_clear(hdev);
4988
4989 for (i = 0; i < irk_count; i++) {
4990 struct mgmt_irk_info *irk = &cp->irks[i];
4991 u8 addr_type;
4992
4993 if (irk->addr.type == BDADDR_LE_PUBLIC)
4994 addr_type = ADDR_LE_DEV_PUBLIC;
4995 else
4996 addr_type = ADDR_LE_DEV_RANDOM;
4997
4998 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4999 BDADDR_ANY);
5000 }
5001
5002 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5003
5004 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5005
5006 hci_dev_unlock(hdev);
5007
5008 return err;
5009 }
5010
5011 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5012 {
5013 if (key->master != 0x00 && key->master != 0x01)
5014 return false;
5015
5016 switch (key->addr.type) {
5017 case BDADDR_LE_PUBLIC:
5018 return true;
5019
5020 case BDADDR_LE_RANDOM:
5021 /* Two most significant bits shall be set */
5022 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5023 return false;
5024 return true;
5025 }
5026
5027 return false;
5028 }
5029
5030 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5031 void *cp_data, u16 len)
5032 {
5033 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5034 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5035 sizeof(struct mgmt_ltk_info));
5036 u16 key_count, expected_len;
5037 int i, err;
5038
5039 BT_DBG("request for %s", hdev->name);
5040
5041 if (!lmp_le_capable(hdev))
5042 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5043 MGMT_STATUS_NOT_SUPPORTED);
5044
5045 key_count = __le16_to_cpu(cp->key_count);
5046 if (key_count > max_key_count) {
5047 BT_ERR("load_ltks: too big key_count value %u", key_count);
5048 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5049 MGMT_STATUS_INVALID_PARAMS);
5050 }
5051
5052 expected_len = sizeof(*cp) + key_count *
5053 sizeof(struct mgmt_ltk_info);
5054 if (expected_len != len) {
5055 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5056 expected_len, len);
5057 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5058 MGMT_STATUS_INVALID_PARAMS);
5059 }
5060
5061 BT_DBG("%s key_count %u", hdev->name, key_count);
5062
5063 for (i = 0; i < key_count; i++) {
5064 struct mgmt_ltk_info *key = &cp->keys[i];
5065
5066 if (!ltk_is_valid(key))
5067 return cmd_status(sk, hdev->id,
5068 MGMT_OP_LOAD_LONG_TERM_KEYS,
5069 MGMT_STATUS_INVALID_PARAMS);
5070 }
5071
5072 hci_dev_lock(hdev);
5073
5074 hci_smp_ltks_clear(hdev);
5075
5076 for (i = 0; i < key_count; i++) {
5077 struct mgmt_ltk_info *key = &cp->keys[i];
5078 u8 type, addr_type, authenticated;
5079
5080 if (key->addr.type == BDADDR_LE_PUBLIC)
5081 addr_type = ADDR_LE_DEV_PUBLIC;
5082 else
5083 addr_type = ADDR_LE_DEV_RANDOM;
5084
5085 switch (key->type) {
5086 case MGMT_LTK_UNAUTHENTICATED:
5087 authenticated = 0x00;
5088 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5089 break;
5090 case MGMT_LTK_AUTHENTICATED:
5091 authenticated = 0x01;
5092 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5093 break;
5094 case MGMT_LTK_P256_UNAUTH:
5095 authenticated = 0x00;
5096 type = SMP_LTK_P256;
5097 break;
5098 case MGMT_LTK_P256_AUTH:
5099 authenticated = 0x01;
5100 type = SMP_LTK_P256;
5101 break;
5102 case MGMT_LTK_P256_DEBUG:
5103 authenticated = 0x00;
5104 type = SMP_LTK_P256_DEBUG;
5105 default:
5106 continue;
5107 }
5108
5109 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5110 authenticated, key->val, key->enc_size, key->ediv,
5111 key->rand);
5112 }
5113
5114 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5115 NULL, 0);
5116
5117 hci_dev_unlock(hdev);
5118
5119 return err;
5120 }
5121
5122 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5123 {
5124 struct hci_conn *conn = cmd->user_data;
5125 struct mgmt_rp_get_conn_info rp;
5126 int err;
5127
5128 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5129
5130 if (status == MGMT_STATUS_SUCCESS) {
5131 rp.rssi = conn->rssi;
5132 rp.tx_power = conn->tx_power;
5133 rp.max_tx_power = conn->max_tx_power;
5134 } else {
5135 rp.rssi = HCI_RSSI_INVALID;
5136 rp.tx_power = HCI_TX_POWER_INVALID;
5137 rp.max_tx_power = HCI_TX_POWER_INVALID;
5138 }
5139
5140 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5141 &rp, sizeof(rp));
5142
5143 hci_conn_drop(conn);
5144 hci_conn_put(conn);
5145
5146 return err;
5147 }
5148
5149 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5150 u16 opcode)
5151 {
5152 struct hci_cp_read_rssi *cp;
5153 struct pending_cmd *cmd;
5154 struct hci_conn *conn;
5155 u16 handle;
5156 u8 status;
5157
5158 BT_DBG("status 0x%02x", hci_status);
5159
5160 hci_dev_lock(hdev);
5161
5162 /* Commands sent in request are either Read RSSI or Read Transmit Power
5163 * Level so we check which one was last sent to retrieve connection
5164 * handle. Both commands have handle as first parameter so it's safe to
5165 * cast data on the same command struct.
5166 *
5167 * First command sent is always Read RSSI and we fail only if it fails.
5168 * In other case we simply override error to indicate success as we
5169 * already remembered if TX power value is actually valid.
5170 */
5171 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5172 if (!cp) {
5173 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5174 status = MGMT_STATUS_SUCCESS;
5175 } else {
5176 status = mgmt_status(hci_status);
5177 }
5178
5179 if (!cp) {
5180 BT_ERR("invalid sent_cmd in conn_info response");
5181 goto unlock;
5182 }
5183
5184 handle = __le16_to_cpu(cp->handle);
5185 conn = hci_conn_hash_lookup_handle(hdev, handle);
5186 if (!conn) {
5187 BT_ERR("unknown handle (%d) in conn_info response", handle);
5188 goto unlock;
5189 }
5190
5191 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5192 if (!cmd)
5193 goto unlock;
5194
5195 cmd->cmd_complete(cmd, status);
5196 mgmt_pending_remove(cmd);
5197
5198 unlock:
5199 hci_dev_unlock(hdev);
5200 }
5201
5202 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5203 u16 len)
5204 {
5205 struct mgmt_cp_get_conn_info *cp = data;
5206 struct mgmt_rp_get_conn_info rp;
5207 struct hci_conn *conn;
5208 unsigned long conn_info_age;
5209 int err = 0;
5210
5211 BT_DBG("%s", hdev->name);
5212
5213 memset(&rp, 0, sizeof(rp));
5214 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5215 rp.addr.type = cp->addr.type;
5216
5217 if (!bdaddr_type_is_valid(cp->addr.type))
5218 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5219 MGMT_STATUS_INVALID_PARAMS,
5220 &rp, sizeof(rp));
5221
5222 hci_dev_lock(hdev);
5223
5224 if (!hdev_is_powered(hdev)) {
5225 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5226 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5227 goto unlock;
5228 }
5229
5230 if (cp->addr.type == BDADDR_BREDR)
5231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5232 &cp->addr.bdaddr);
5233 else
5234 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5235
5236 if (!conn || conn->state != BT_CONNECTED) {
5237 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5238 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5239 goto unlock;
5240 }
5241
5242 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5243 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5244 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5245 goto unlock;
5246 }
5247
5248 /* To avoid client trying to guess when to poll again for information we
5249 * calculate conn info age as random value between min/max set in hdev.
5250 */
5251 conn_info_age = hdev->conn_info_min_age +
5252 prandom_u32_max(hdev->conn_info_max_age -
5253 hdev->conn_info_min_age);
5254
5255 /* Query controller to refresh cached values if they are too old or were
5256 * never read.
5257 */
5258 if (time_after(jiffies, conn->conn_info_timestamp +
5259 msecs_to_jiffies(conn_info_age)) ||
5260 !conn->conn_info_timestamp) {
5261 struct hci_request req;
5262 struct hci_cp_read_tx_power req_txp_cp;
5263 struct hci_cp_read_rssi req_rssi_cp;
5264 struct pending_cmd *cmd;
5265
5266 hci_req_init(&req, hdev);
5267 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5268 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5269 &req_rssi_cp);
5270
5271 /* For LE links TX power does not change thus we don't need to
5272 * query for it once value is known.
5273 */
5274 if (!bdaddr_type_is_le(cp->addr.type) ||
5275 conn->tx_power == HCI_TX_POWER_INVALID) {
5276 req_txp_cp.handle = cpu_to_le16(conn->handle);
5277 req_txp_cp.type = 0x00;
5278 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5279 sizeof(req_txp_cp), &req_txp_cp);
5280 }
5281
5282 /* Max TX power needs to be read only once per connection */
5283 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5284 req_txp_cp.handle = cpu_to_le16(conn->handle);
5285 req_txp_cp.type = 0x01;
5286 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5287 sizeof(req_txp_cp), &req_txp_cp);
5288 }
5289
5290 err = hci_req_run(&req, conn_info_refresh_complete);
5291 if (err < 0)
5292 goto unlock;
5293
5294 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5295 data, len);
5296 if (!cmd) {
5297 err = -ENOMEM;
5298 goto unlock;
5299 }
5300
5301 hci_conn_hold(conn);
5302 cmd->user_data = hci_conn_get(conn);
5303 cmd->cmd_complete = conn_info_cmd_complete;
5304
5305 conn->conn_info_timestamp = jiffies;
5306 } else {
5307 /* Cache is valid, just reply with values cached in hci_conn */
5308 rp.rssi = conn->rssi;
5309 rp.tx_power = conn->tx_power;
5310 rp.max_tx_power = conn->max_tx_power;
5311
5312 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5313 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5314 }
5315
5316 unlock:
5317 hci_dev_unlock(hdev);
5318 return err;
5319 }
5320
5321 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5322 {
5323 struct hci_conn *conn = cmd->user_data;
5324 struct mgmt_rp_get_clock_info rp;
5325 struct hci_dev *hdev;
5326 int err;
5327
5328 memset(&rp, 0, sizeof(rp));
5329 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5330
5331 if (status)
5332 goto complete;
5333
5334 hdev = hci_dev_get(cmd->index);
5335 if (hdev) {
5336 rp.local_clock = cpu_to_le32(hdev->clock);
5337 hci_dev_put(hdev);
5338 }
5339
5340 if (conn) {
5341 rp.piconet_clock = cpu_to_le32(conn->clock);
5342 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5343 }
5344
5345 complete:
5346 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5347 sizeof(rp));
5348
5349 if (conn) {
5350 hci_conn_drop(conn);
5351 hci_conn_put(conn);
5352 }
5353
5354 return err;
5355 }
5356
5357 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5358 {
5359 struct hci_cp_read_clock *hci_cp;
5360 struct pending_cmd *cmd;
5361 struct hci_conn *conn;
5362
5363 BT_DBG("%s status %u", hdev->name, status);
5364
5365 hci_dev_lock(hdev);
5366
5367 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5368 if (!hci_cp)
5369 goto unlock;
5370
5371 if (hci_cp->which) {
5372 u16 handle = __le16_to_cpu(hci_cp->handle);
5373 conn = hci_conn_hash_lookup_handle(hdev, handle);
5374 } else {
5375 conn = NULL;
5376 }
5377
5378 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5379 if (!cmd)
5380 goto unlock;
5381
5382 cmd->cmd_complete(cmd, mgmt_status(status));
5383 mgmt_pending_remove(cmd);
5384
5385 unlock:
5386 hci_dev_unlock(hdev);
5387 }
5388
5389 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5390 u16 len)
5391 {
5392 struct mgmt_cp_get_clock_info *cp = data;
5393 struct mgmt_rp_get_clock_info rp;
5394 struct hci_cp_read_clock hci_cp;
5395 struct pending_cmd *cmd;
5396 struct hci_request req;
5397 struct hci_conn *conn;
5398 int err;
5399
5400 BT_DBG("%s", hdev->name);
5401
5402 memset(&rp, 0, sizeof(rp));
5403 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5404 rp.addr.type = cp->addr.type;
5405
5406 if (cp->addr.type != BDADDR_BREDR)
5407 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5408 MGMT_STATUS_INVALID_PARAMS,
5409 &rp, sizeof(rp));
5410
5411 hci_dev_lock(hdev);
5412
5413 if (!hdev_is_powered(hdev)) {
5414 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5415 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5416 goto unlock;
5417 }
5418
5419 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5420 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5421 &cp->addr.bdaddr);
5422 if (!conn || conn->state != BT_CONNECTED) {
5423 err = cmd_complete(sk, hdev->id,
5424 MGMT_OP_GET_CLOCK_INFO,
5425 MGMT_STATUS_NOT_CONNECTED,
5426 &rp, sizeof(rp));
5427 goto unlock;
5428 }
5429 } else {
5430 conn = NULL;
5431 }
5432
5433 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5434 if (!cmd) {
5435 err = -ENOMEM;
5436 goto unlock;
5437 }
5438
5439 cmd->cmd_complete = clock_info_cmd_complete;
5440
5441 hci_req_init(&req, hdev);
5442
5443 memset(&hci_cp, 0, sizeof(hci_cp));
5444 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5445
5446 if (conn) {
5447 hci_conn_hold(conn);
5448 cmd->user_data = hci_conn_get(conn);
5449
5450 hci_cp.handle = cpu_to_le16(conn->handle);
5451 hci_cp.which = 0x01; /* Piconet clock */
5452 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5453 }
5454
5455 err = hci_req_run(&req, get_clock_info_complete);
5456 if (err < 0)
5457 mgmt_pending_remove(cmd);
5458
5459 unlock:
5460 hci_dev_unlock(hdev);
5461 return err;
5462 }
5463
5464 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5465 {
5466 struct hci_conn *conn;
5467
5468 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5469 if (!conn)
5470 return false;
5471
5472 if (conn->dst_type != type)
5473 return false;
5474
5475 if (conn->state != BT_CONNECTED)
5476 return false;
5477
5478 return true;
5479 }
5480
5481 /* This function requires the caller holds hdev->lock */
5482 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5483 u8 addr_type, u8 auto_connect)
5484 {
5485 struct hci_dev *hdev = req->hdev;
5486 struct hci_conn_params *params;
5487
5488 params = hci_conn_params_add(hdev, addr, addr_type);
5489 if (!params)
5490 return -EIO;
5491
5492 if (params->auto_connect == auto_connect)
5493 return 0;
5494
5495 list_del_init(&params->action);
5496
5497 switch (auto_connect) {
5498 case HCI_AUTO_CONN_DISABLED:
5499 case HCI_AUTO_CONN_LINK_LOSS:
5500 __hci_update_background_scan(req);
5501 break;
5502 case HCI_AUTO_CONN_REPORT:
5503 list_add(&params->action, &hdev->pend_le_reports);
5504 __hci_update_background_scan(req);
5505 break;
5506 case HCI_AUTO_CONN_DIRECT:
5507 case HCI_AUTO_CONN_ALWAYS:
5508 if (!is_connected(hdev, addr, addr_type)) {
5509 list_add(&params->action, &hdev->pend_le_conns);
5510 __hci_update_background_scan(req);
5511 }
5512 break;
5513 }
5514
5515 params->auto_connect = auto_connect;
5516
5517 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5518 auto_connect);
5519
5520 return 0;
5521 }
5522
5523 static void device_added(struct sock *sk, struct hci_dev *hdev,
5524 bdaddr_t *bdaddr, u8 type, u8 action)
5525 {
5526 struct mgmt_ev_device_added ev;
5527
5528 bacpy(&ev.addr.bdaddr, bdaddr);
5529 ev.addr.type = type;
5530 ev.action = action;
5531
5532 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5533 }
5534
5535 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5536 {
5537 struct pending_cmd *cmd;
5538
5539 BT_DBG("status 0x%02x", status);
5540
5541 hci_dev_lock(hdev);
5542
5543 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5544 if (!cmd)
5545 goto unlock;
5546
5547 cmd->cmd_complete(cmd, mgmt_status(status));
5548 mgmt_pending_remove(cmd);
5549
5550 unlock:
5551 hci_dev_unlock(hdev);
5552 }
5553
5554 static int add_device(struct sock *sk, struct hci_dev *hdev,
5555 void *data, u16 len)
5556 {
5557 struct mgmt_cp_add_device *cp = data;
5558 struct pending_cmd *cmd;
5559 struct hci_request req;
5560 u8 auto_conn, addr_type;
5561 int err;
5562
5563 BT_DBG("%s", hdev->name);
5564
5565 if (!bdaddr_type_is_valid(cp->addr.type) ||
5566 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5567 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5568 MGMT_STATUS_INVALID_PARAMS,
5569 &cp->addr, sizeof(cp->addr));
5570
5571 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5572 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5573 MGMT_STATUS_INVALID_PARAMS,
5574 &cp->addr, sizeof(cp->addr));
5575
5576 hci_req_init(&req, hdev);
5577
5578 hci_dev_lock(hdev);
5579
5580 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5581 if (!cmd) {
5582 err = -ENOMEM;
5583 goto unlock;
5584 }
5585
5586 cmd->cmd_complete = addr_cmd_complete;
5587
5588 if (cp->addr.type == BDADDR_BREDR) {
5589 /* Only incoming connections action is supported for now */
5590 if (cp->action != 0x01) {
5591 err = cmd->cmd_complete(cmd,
5592 MGMT_STATUS_INVALID_PARAMS);
5593 mgmt_pending_remove(cmd);
5594 goto unlock;
5595 }
5596
5597 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5598 cp->addr.type);
5599 if (err)
5600 goto unlock;
5601
5602 __hci_update_page_scan(&req);
5603
5604 goto added;
5605 }
5606
5607 if (cp->addr.type == BDADDR_LE_PUBLIC)
5608 addr_type = ADDR_LE_DEV_PUBLIC;
5609 else
5610 addr_type = ADDR_LE_DEV_RANDOM;
5611
5612 if (cp->action == 0x02)
5613 auto_conn = HCI_AUTO_CONN_ALWAYS;
5614 else if (cp->action == 0x01)
5615 auto_conn = HCI_AUTO_CONN_DIRECT;
5616 else
5617 auto_conn = HCI_AUTO_CONN_REPORT;
5618
5619 /* If the connection parameters don't exist for this device,
5620 * they will be created and configured with defaults.
5621 */
5622 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5623 auto_conn) < 0) {
5624 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5625 mgmt_pending_remove(cmd);
5626 goto unlock;
5627 }
5628
5629 added:
5630 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5631
5632 err = hci_req_run(&req, add_device_complete);
5633 if (err < 0) {
5634 /* ENODATA means no HCI commands were needed (e.g. if
5635 * the adapter is powered off).
5636 */
5637 if (err == -ENODATA)
5638 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5639 mgmt_pending_remove(cmd);
5640 }
5641
5642 unlock:
5643 hci_dev_unlock(hdev);
5644 return err;
5645 }
5646
5647 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5648 bdaddr_t *bdaddr, u8 type)
5649 {
5650 struct mgmt_ev_device_removed ev;
5651
5652 bacpy(&ev.addr.bdaddr, bdaddr);
5653 ev.addr.type = type;
5654
5655 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5656 }
5657
5658 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5659 {
5660 struct pending_cmd *cmd;
5661
5662 BT_DBG("status 0x%02x", status);
5663
5664 hci_dev_lock(hdev);
5665
5666 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5667 if (!cmd)
5668 goto unlock;
5669
5670 cmd->cmd_complete(cmd, mgmt_status(status));
5671 mgmt_pending_remove(cmd);
5672
5673 unlock:
5674 hci_dev_unlock(hdev);
5675 }
5676
5677 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5678 void *data, u16 len)
5679 {
5680 struct mgmt_cp_remove_device *cp = data;
5681 struct pending_cmd *cmd;
5682 struct hci_request req;
5683 int err;
5684
5685 BT_DBG("%s", hdev->name);
5686
5687 hci_req_init(&req, hdev);
5688
5689 hci_dev_lock(hdev);
5690
5691 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5692 if (!cmd) {
5693 err = -ENOMEM;
5694 goto unlock;
5695 }
5696
5697 cmd->cmd_complete = addr_cmd_complete;
5698
5699 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5700 struct hci_conn_params *params;
5701 u8 addr_type;
5702
5703 if (!bdaddr_type_is_valid(cp->addr.type)) {
5704 err = cmd->cmd_complete(cmd,
5705 MGMT_STATUS_INVALID_PARAMS);
5706 mgmt_pending_remove(cmd);
5707 goto unlock;
5708 }
5709
5710 if (cp->addr.type == BDADDR_BREDR) {
5711 err = hci_bdaddr_list_del(&hdev->whitelist,
5712 &cp->addr.bdaddr,
5713 cp->addr.type);
5714 if (err) {
5715 err = cmd->cmd_complete(cmd,
5716 MGMT_STATUS_INVALID_PARAMS);
5717 mgmt_pending_remove(cmd);
5718 goto unlock;
5719 }
5720
5721 __hci_update_page_scan(&req);
5722
5723 device_removed(sk, hdev, &cp->addr.bdaddr,
5724 cp->addr.type);
5725 goto complete;
5726 }
5727
5728 if (cp->addr.type == BDADDR_LE_PUBLIC)
5729 addr_type = ADDR_LE_DEV_PUBLIC;
5730 else
5731 addr_type = ADDR_LE_DEV_RANDOM;
5732
5733 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5734 addr_type);
5735 if (!params) {
5736 err = cmd->cmd_complete(cmd,
5737 MGMT_STATUS_INVALID_PARAMS);
5738 mgmt_pending_remove(cmd);
5739 goto unlock;
5740 }
5741
5742 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5743 err = cmd->cmd_complete(cmd,
5744 MGMT_STATUS_INVALID_PARAMS);
5745 mgmt_pending_remove(cmd);
5746 goto unlock;
5747 }
5748
5749 list_del(&params->action);
5750 list_del(&params->list);
5751 kfree(params);
5752 __hci_update_background_scan(&req);
5753
5754 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5755 } else {
5756 struct hci_conn_params *p, *tmp;
5757 struct bdaddr_list *b, *btmp;
5758
5759 if (cp->addr.type) {
5760 err = cmd->cmd_complete(cmd,
5761 MGMT_STATUS_INVALID_PARAMS);
5762 mgmt_pending_remove(cmd);
5763 goto unlock;
5764 }
5765
5766 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5767 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5768 list_del(&b->list);
5769 kfree(b);
5770 }
5771
5772 __hci_update_page_scan(&req);
5773
5774 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5775 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5776 continue;
5777 device_removed(sk, hdev, &p->addr, p->addr_type);
5778 list_del(&p->action);
5779 list_del(&p->list);
5780 kfree(p);
5781 }
5782
5783 BT_DBG("All LE connection parameters were removed");
5784
5785 __hci_update_background_scan(&req);
5786 }
5787
5788 complete:
5789 err = hci_req_run(&req, remove_device_complete);
5790 if (err < 0) {
5791 /* ENODATA means no HCI commands were needed (e.g. if
5792 * the adapter is powered off).
5793 */
5794 if (err == -ENODATA)
5795 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5796 mgmt_pending_remove(cmd);
5797 }
5798
5799 unlock:
5800 hci_dev_unlock(hdev);
5801 return err;
5802 }
5803
5804 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5805 u16 len)
5806 {
5807 struct mgmt_cp_load_conn_param *cp = data;
5808 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5809 sizeof(struct mgmt_conn_param));
5810 u16 param_count, expected_len;
5811 int i;
5812
5813 if (!lmp_le_capable(hdev))
5814 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5815 MGMT_STATUS_NOT_SUPPORTED);
5816
5817 param_count = __le16_to_cpu(cp->param_count);
5818 if (param_count > max_param_count) {
5819 BT_ERR("load_conn_param: too big param_count value %u",
5820 param_count);
5821 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5822 MGMT_STATUS_INVALID_PARAMS);
5823 }
5824
5825 expected_len = sizeof(*cp) + param_count *
5826 sizeof(struct mgmt_conn_param);
5827 if (expected_len != len) {
5828 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5829 expected_len, len);
5830 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5831 MGMT_STATUS_INVALID_PARAMS);
5832 }
5833
5834 BT_DBG("%s param_count %u", hdev->name, param_count);
5835
5836 hci_dev_lock(hdev);
5837
5838 hci_conn_params_clear_disabled(hdev);
5839
5840 for (i = 0; i < param_count; i++) {
5841 struct mgmt_conn_param *param = &cp->params[i];
5842 struct hci_conn_params *hci_param;
5843 u16 min, max, latency, timeout;
5844 u8 addr_type;
5845
5846 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5847 param->addr.type);
5848
5849 if (param->addr.type == BDADDR_LE_PUBLIC) {
5850 addr_type = ADDR_LE_DEV_PUBLIC;
5851 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5852 addr_type = ADDR_LE_DEV_RANDOM;
5853 } else {
5854 BT_ERR("Ignoring invalid connection parameters");
5855 continue;
5856 }
5857
5858 min = le16_to_cpu(param->min_interval);
5859 max = le16_to_cpu(param->max_interval);
5860 latency = le16_to_cpu(param->latency);
5861 timeout = le16_to_cpu(param->timeout);
5862
5863 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5864 min, max, latency, timeout);
5865
5866 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5867 BT_ERR("Ignoring invalid connection parameters");
5868 continue;
5869 }
5870
5871 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5872 addr_type);
5873 if (!hci_param) {
5874 BT_ERR("Failed to add connection parameters");
5875 continue;
5876 }
5877
5878 hci_param->conn_min_interval = min;
5879 hci_param->conn_max_interval = max;
5880 hci_param->conn_latency = latency;
5881 hci_param->supervision_timeout = timeout;
5882 }
5883
5884 hci_dev_unlock(hdev);
5885
5886 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5887 }
5888
5889 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5890 void *data, u16 len)
5891 {
5892 struct mgmt_cp_set_external_config *cp = data;
5893 bool changed;
5894 int err;
5895
5896 BT_DBG("%s", hdev->name);
5897
5898 if (hdev_is_powered(hdev))
5899 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5900 MGMT_STATUS_REJECTED);
5901
5902 if (cp->config != 0x00 && cp->config != 0x01)
5903 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5904 MGMT_STATUS_INVALID_PARAMS);
5905
5906 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5907 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5908 MGMT_STATUS_NOT_SUPPORTED);
5909
5910 hci_dev_lock(hdev);
5911
5912 if (cp->config)
5913 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5914 &hdev->dev_flags);
5915 else
5916 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5917 &hdev->dev_flags);
5918
5919 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5920 if (err < 0)
5921 goto unlock;
5922
5923 if (!changed)
5924 goto unlock;
5925
5926 err = new_options(hdev, sk);
5927
5928 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5929 mgmt_index_removed(hdev);
5930
5931 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5932 set_bit(HCI_CONFIG, &hdev->dev_flags);
5933 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5934
5935 queue_work(hdev->req_workqueue, &hdev->power_on);
5936 } else {
5937 set_bit(HCI_RAW, &hdev->flags);
5938 mgmt_index_added(hdev);
5939 }
5940 }
5941
5942 unlock:
5943 hci_dev_unlock(hdev);
5944 return err;
5945 }
5946
5947 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5948 void *data, u16 len)
5949 {
5950 struct mgmt_cp_set_public_address *cp = data;
5951 bool changed;
5952 int err;
5953
5954 BT_DBG("%s", hdev->name);
5955
5956 if (hdev_is_powered(hdev))
5957 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5958 MGMT_STATUS_REJECTED);
5959
5960 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5961 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5962 MGMT_STATUS_INVALID_PARAMS);
5963
5964 if (!hdev->set_bdaddr)
5965 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5966 MGMT_STATUS_NOT_SUPPORTED);
5967
5968 hci_dev_lock(hdev);
5969
5970 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5971 bacpy(&hdev->public_addr, &cp->bdaddr);
5972
5973 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5974 if (err < 0)
5975 goto unlock;
5976
5977 if (!changed)
5978 goto unlock;
5979
5980 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5981 err = new_options(hdev, sk);
5982
5983 if (is_configured(hdev)) {
5984 mgmt_index_removed(hdev);
5985
5986 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5987
5988 set_bit(HCI_CONFIG, &hdev->dev_flags);
5989 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5990
5991 queue_work(hdev->req_workqueue, &hdev->power_on);
5992 }
5993
5994 unlock:
5995 hci_dev_unlock(hdev);
5996 return err;
5997 }
5998
5999 static const struct mgmt_handler {
6000 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6001 u16 data_len);
6002 bool var_len;
6003 size_t data_len;
6004 } mgmt_handlers[] = {
6005 { NULL }, /* 0x0000 (no command) */
6006 { read_version, false, MGMT_READ_VERSION_SIZE },
6007 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6008 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6009 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6010 { set_powered, false, MGMT_SETTING_SIZE },
6011 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6012 { set_connectable, false, MGMT_SETTING_SIZE },
6013 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6014 { set_bondable, false, MGMT_SETTING_SIZE },
6015 { set_link_security, false, MGMT_SETTING_SIZE },
6016 { set_ssp, false, MGMT_SETTING_SIZE },
6017 { set_hs, false, MGMT_SETTING_SIZE },
6018 { set_le, false, MGMT_SETTING_SIZE },
6019 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6020 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6021 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6022 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6023 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6024 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6025 { disconnect, false, MGMT_DISCONNECT_SIZE },
6026 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6027 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6028 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6029 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6030 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6031 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6032 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6033 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6034 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6035 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6036 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6037 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6038 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6039 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6040 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6041 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6042 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6043 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6044 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6045 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6046 { set_advertising, false, MGMT_SETTING_SIZE },
6047 { set_bredr, false, MGMT_SETTING_SIZE },
6048 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6049 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6050 { set_secure_conn, false, MGMT_SETTING_SIZE },
6051 { set_debug_keys, false, MGMT_SETTING_SIZE },
6052 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6053 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6054 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6055 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6056 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6057 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6058 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6059 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6060 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6061 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6062 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6063 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6064 };
6065
6066 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6067 {
6068 void *buf;
6069 u8 *cp;
6070 struct mgmt_hdr *hdr;
6071 u16 opcode, index, len;
6072 struct hci_dev *hdev = NULL;
6073 const struct mgmt_handler *handler;
6074 int err;
6075
6076 BT_DBG("got %zu bytes", msglen);
6077
6078 if (msglen < sizeof(*hdr))
6079 return -EINVAL;
6080
6081 buf = kmalloc(msglen, GFP_KERNEL);
6082 if (!buf)
6083 return -ENOMEM;
6084
6085 if (memcpy_from_msg(buf, msg, msglen)) {
6086 err = -EFAULT;
6087 goto done;
6088 }
6089
6090 hdr = buf;
6091 opcode = __le16_to_cpu(hdr->opcode);
6092 index = __le16_to_cpu(hdr->index);
6093 len = __le16_to_cpu(hdr->len);
6094
6095 if (len != msglen - sizeof(*hdr)) {
6096 err = -EINVAL;
6097 goto done;
6098 }
6099
6100 if (index != MGMT_INDEX_NONE) {
6101 hdev = hci_dev_get(index);
6102 if (!hdev) {
6103 err = cmd_status(sk, index, opcode,
6104 MGMT_STATUS_INVALID_INDEX);
6105 goto done;
6106 }
6107
6108 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6109 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6110 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6111 err = cmd_status(sk, index, opcode,
6112 MGMT_STATUS_INVALID_INDEX);
6113 goto done;
6114 }
6115
6116 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6117 opcode != MGMT_OP_READ_CONFIG_INFO &&
6118 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6119 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6120 err = cmd_status(sk, index, opcode,
6121 MGMT_STATUS_INVALID_INDEX);
6122 goto done;
6123 }
6124 }
6125
6126 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6127 mgmt_handlers[opcode].func == NULL) {
6128 BT_DBG("Unknown op %u", opcode);
6129 err = cmd_status(sk, index, opcode,
6130 MGMT_STATUS_UNKNOWN_COMMAND);
6131 goto done;
6132 }
6133
6134 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6135 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6136 err = cmd_status(sk, index, opcode,
6137 MGMT_STATUS_INVALID_INDEX);
6138 goto done;
6139 }
6140
6141 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6142 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6143 err = cmd_status(sk, index, opcode,
6144 MGMT_STATUS_INVALID_INDEX);
6145 goto done;
6146 }
6147
6148 handler = &mgmt_handlers[opcode];
6149
6150 if ((handler->var_len && len < handler->data_len) ||
6151 (!handler->var_len && len != handler->data_len)) {
6152 err = cmd_status(sk, index, opcode,
6153 MGMT_STATUS_INVALID_PARAMS);
6154 goto done;
6155 }
6156
6157 if (hdev)
6158 mgmt_init_hdev(sk, hdev);
6159
6160 cp = buf + sizeof(*hdr);
6161
6162 err = handler->func(sk, hdev, cp, len);
6163 if (err < 0)
6164 goto done;
6165
6166 err = msglen;
6167
6168 done:
6169 if (hdev)
6170 hci_dev_put(hdev);
6171
6172 kfree(buf);
6173 return err;
6174 }
6175
6176 void mgmt_index_added(struct hci_dev *hdev)
6177 {
6178 if (hdev->dev_type != HCI_BREDR)
6179 return;
6180
6181 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6182 return;
6183
6184 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6185 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6186 else
6187 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6188 }
6189
6190 void mgmt_index_removed(struct hci_dev *hdev)
6191 {
6192 u8 status = MGMT_STATUS_INVALID_INDEX;
6193
6194 if (hdev->dev_type != HCI_BREDR)
6195 return;
6196
6197 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6198 return;
6199
6200 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6201
6202 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6203 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6204 else
6205 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6206 }
6207
6208 /* This function requires the caller holds hdev->lock */
6209 static void restart_le_actions(struct hci_request *req)
6210 {
6211 struct hci_dev *hdev = req->hdev;
6212 struct hci_conn_params *p;
6213
6214 list_for_each_entry(p, &hdev->le_conn_params, list) {
6215 /* Needed for AUTO_OFF case where might not "really"
6216 * have been powered off.
6217 */
6218 list_del_init(&p->action);
6219
6220 switch (p->auto_connect) {
6221 case HCI_AUTO_CONN_DIRECT:
6222 case HCI_AUTO_CONN_ALWAYS:
6223 list_add(&p->action, &hdev->pend_le_conns);
6224 break;
6225 case HCI_AUTO_CONN_REPORT:
6226 list_add(&p->action, &hdev->pend_le_reports);
6227 break;
6228 default:
6229 break;
6230 }
6231 }
6232
6233 __hci_update_background_scan(req);
6234 }
6235
6236 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6237 {
6238 struct cmd_lookup match = { NULL, hdev };
6239
6240 BT_DBG("status 0x%02x", status);
6241
6242 if (!status) {
6243 /* Register the available SMP channels (BR/EDR and LE) only
6244 * when successfully powering on the controller. This late
6245 * registration is required so that LE SMP can clearly
6246 * decide if the public address or static address is used.
6247 */
6248 smp_register(hdev);
6249 }
6250
6251 hci_dev_lock(hdev);
6252
6253 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6254
6255 new_settings(hdev, match.sk);
6256
6257 hci_dev_unlock(hdev);
6258
6259 if (match.sk)
6260 sock_put(match.sk);
6261 }
6262
6263 static int powered_update_hci(struct hci_dev *hdev)
6264 {
6265 struct hci_request req;
6266 u8 link_sec;
6267
6268 hci_req_init(&req, hdev);
6269
6270 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6271 !lmp_host_ssp_capable(hdev)) {
6272 u8 mode = 0x01;
6273
6274 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6275
6276 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6277 u8 support = 0x01;
6278
6279 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6280 sizeof(support), &support);
6281 }
6282 }
6283
6284 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6285 lmp_bredr_capable(hdev)) {
6286 struct hci_cp_write_le_host_supported cp;
6287
6288 cp.le = 0x01;
6289 cp.simul = 0x00;
6290
6291 /* Check first if we already have the right
6292 * host state (host features set)
6293 */
6294 if (cp.le != lmp_host_le_capable(hdev) ||
6295 cp.simul != lmp_host_le_br_capable(hdev))
6296 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6297 sizeof(cp), &cp);
6298 }
6299
6300 if (lmp_le_capable(hdev)) {
6301 /* Make sure the controller has a good default for
6302 * advertising data. This also applies to the case
6303 * where BR/EDR was toggled during the AUTO_OFF phase.
6304 */
6305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6306 update_adv_data(&req);
6307 update_scan_rsp_data(&req);
6308 }
6309
6310 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6311 enable_advertising(&req);
6312
6313 restart_le_actions(&req);
6314 }
6315
6316 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6317 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6318 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6319 sizeof(link_sec), &link_sec);
6320
6321 if (lmp_bredr_capable(hdev)) {
6322 write_fast_connectable(&req, false);
6323 __hci_update_page_scan(&req);
6324 update_class(&req);
6325 update_name(&req);
6326 update_eir(&req);
6327 }
6328
6329 return hci_req_run(&req, powered_complete);
6330 }
6331
6332 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6333 {
6334 struct cmd_lookup match = { NULL, hdev };
6335 u8 status, zero_cod[] = { 0, 0, 0 };
6336 int err;
6337
6338 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6339 return 0;
6340
6341 if (powered) {
6342 if (powered_update_hci(hdev) == 0)
6343 return 0;
6344
6345 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6346 &match);
6347 goto new_settings;
6348 }
6349
6350 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6351
6352 /* If the power off is because of hdev unregistration let
6353 * use the appropriate INVALID_INDEX status. Otherwise use
6354 * NOT_POWERED. We cover both scenarios here since later in
6355 * mgmt_index_removed() any hci_conn callbacks will have already
6356 * been triggered, potentially causing misleading DISCONNECTED
6357 * status responses.
6358 */
6359 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6360 status = MGMT_STATUS_INVALID_INDEX;
6361 else
6362 status = MGMT_STATUS_NOT_POWERED;
6363
6364 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6365
6366 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6367 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6368 zero_cod, sizeof(zero_cod), NULL);
6369
6370 new_settings:
6371 err = new_settings(hdev, match.sk);
6372
6373 if (match.sk)
6374 sock_put(match.sk);
6375
6376 return err;
6377 }
6378
6379 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6380 {
6381 struct pending_cmd *cmd;
6382 u8 status;
6383
6384 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6385 if (!cmd)
6386 return;
6387
6388 if (err == -ERFKILL)
6389 status = MGMT_STATUS_RFKILLED;
6390 else
6391 status = MGMT_STATUS_FAILED;
6392
6393 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6394
6395 mgmt_pending_remove(cmd);
6396 }
6397
6398 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6399 {
6400 struct hci_request req;
6401
6402 hci_dev_lock(hdev);
6403
6404 /* When discoverable timeout triggers, then just make sure
6405 * the limited discoverable flag is cleared. Even in the case
6406 * of a timeout triggered from general discoverable, it is
6407 * safe to unconditionally clear the flag.
6408 */
6409 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6410 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6411
6412 hci_req_init(&req, hdev);
6413 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6414 u8 scan = SCAN_PAGE;
6415 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6416 sizeof(scan), &scan);
6417 }
6418 update_class(&req);
6419 update_adv_data(&req);
6420 hci_req_run(&req, NULL);
6421
6422 hdev->discov_timeout = 0;
6423
6424 new_settings(hdev, NULL);
6425
6426 hci_dev_unlock(hdev);
6427 }
6428
6429 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6430 bool persistent)
6431 {
6432 struct mgmt_ev_new_link_key ev;
6433
6434 memset(&ev, 0, sizeof(ev));
6435
6436 ev.store_hint = persistent;
6437 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6438 ev.key.addr.type = BDADDR_BREDR;
6439 ev.key.type = key->type;
6440 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6441 ev.key.pin_len = key->pin_len;
6442
6443 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6444 }
6445
6446 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6447 {
6448 switch (ltk->type) {
6449 case SMP_LTK:
6450 case SMP_LTK_SLAVE:
6451 if (ltk->authenticated)
6452 return MGMT_LTK_AUTHENTICATED;
6453 return MGMT_LTK_UNAUTHENTICATED;
6454 case SMP_LTK_P256:
6455 if (ltk->authenticated)
6456 return MGMT_LTK_P256_AUTH;
6457 return MGMT_LTK_P256_UNAUTH;
6458 case SMP_LTK_P256_DEBUG:
6459 return MGMT_LTK_P256_DEBUG;
6460 }
6461
6462 return MGMT_LTK_UNAUTHENTICATED;
6463 }
6464
6465 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6466 {
6467 struct mgmt_ev_new_long_term_key ev;
6468
6469 memset(&ev, 0, sizeof(ev));
6470
6471 /* Devices using resolvable or non-resolvable random addresses
6472 * without providing an indentity resolving key don't require
6473 * to store long term keys. Their addresses will change the
6474 * next time around.
6475 *
6476 * Only when a remote device provides an identity address
6477 * make sure the long term key is stored. If the remote
6478 * identity is known, the long term keys are internally
6479 * mapped to the identity address. So allow static random
6480 * and public addresses here.
6481 */
6482 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6483 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6484 ev.store_hint = 0x00;
6485 else
6486 ev.store_hint = persistent;
6487
6488 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6489 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6490 ev.key.type = mgmt_ltk_type(key);
6491 ev.key.enc_size = key->enc_size;
6492 ev.key.ediv = key->ediv;
6493 ev.key.rand = key->rand;
6494
6495 if (key->type == SMP_LTK)
6496 ev.key.master = 1;
6497
6498 memcpy(ev.key.val, key->val, sizeof(key->val));
6499
6500 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6501 }
6502
6503 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6504 {
6505 struct mgmt_ev_new_irk ev;
6506
6507 memset(&ev, 0, sizeof(ev));
6508
6509 /* For identity resolving keys from devices that are already
6510 * using a public address or static random address, do not
6511 * ask for storing this key. The identity resolving key really
6512 * is only mandatory for devices using resovlable random
6513 * addresses.
6514 *
6515 * Storing all identity resolving keys has the downside that
6516 * they will be also loaded on next boot of they system. More
6517 * identity resolving keys, means more time during scanning is
6518 * needed to actually resolve these addresses.
6519 */
6520 if (bacmp(&irk->rpa, BDADDR_ANY))
6521 ev.store_hint = 0x01;
6522 else
6523 ev.store_hint = 0x00;
6524
6525 bacpy(&ev.rpa, &irk->rpa);
6526 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6527 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6528 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6529
6530 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6531 }
6532
6533 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6534 bool persistent)
6535 {
6536 struct mgmt_ev_new_csrk ev;
6537
6538 memset(&ev, 0, sizeof(ev));
6539
6540 /* Devices using resolvable or non-resolvable random addresses
6541 * without providing an indentity resolving key don't require
6542 * to store signature resolving keys. Their addresses will change
6543 * the next time around.
6544 *
6545 * Only when a remote device provides an identity address
6546 * make sure the signature resolving key is stored. So allow
6547 * static random and public addresses here.
6548 */
6549 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6550 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6551 ev.store_hint = 0x00;
6552 else
6553 ev.store_hint = persistent;
6554
6555 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6556 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6557 ev.key.master = csrk->master;
6558 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6559
6560 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6561 }
6562
6563 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6564 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6565 u16 max_interval, u16 latency, u16 timeout)
6566 {
6567 struct mgmt_ev_new_conn_param ev;
6568
6569 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6570 return;
6571
6572 memset(&ev, 0, sizeof(ev));
6573 bacpy(&ev.addr.bdaddr, bdaddr);
6574 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6575 ev.store_hint = store_hint;
6576 ev.min_interval = cpu_to_le16(min_interval);
6577 ev.max_interval = cpu_to_le16(max_interval);
6578 ev.latency = cpu_to_le16(latency);
6579 ev.timeout = cpu_to_le16(timeout);
6580
6581 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6582 }
6583
6584 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6585 u8 data_len)
6586 {
6587 eir[eir_len++] = sizeof(type) + data_len;
6588 eir[eir_len++] = type;
6589 memcpy(&eir[eir_len], data, data_len);
6590 eir_len += data_len;
6591
6592 return eir_len;
6593 }
6594
6595 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6596 u32 flags, u8 *name, u8 name_len)
6597 {
6598 char buf[512];
6599 struct mgmt_ev_device_connected *ev = (void *) buf;
6600 u16 eir_len = 0;
6601
6602 bacpy(&ev->addr.bdaddr, &conn->dst);
6603 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6604
6605 ev->flags = __cpu_to_le32(flags);
6606
6607 /* We must ensure that the EIR Data fields are ordered and
6608 * unique. Keep it simple for now and avoid the problem by not
6609 * adding any BR/EDR data to the LE adv.
6610 */
6611 if (conn->le_adv_data_len > 0) {
6612 memcpy(&ev->eir[eir_len],
6613 conn->le_adv_data, conn->le_adv_data_len);
6614 eir_len = conn->le_adv_data_len;
6615 } else {
6616 if (name_len > 0)
6617 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6618 name, name_len);
6619
6620 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6621 eir_len = eir_append_data(ev->eir, eir_len,
6622 EIR_CLASS_OF_DEV,
6623 conn->dev_class, 3);
6624 }
6625
6626 ev->eir_len = cpu_to_le16(eir_len);
6627
6628 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6629 sizeof(*ev) + eir_len, NULL);
6630 }
6631
6632 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6633 {
6634 struct sock **sk = data;
6635
6636 cmd->cmd_complete(cmd, 0);
6637
6638 *sk = cmd->sk;
6639 sock_hold(*sk);
6640
6641 mgmt_pending_remove(cmd);
6642 }
6643
6644 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6645 {
6646 struct hci_dev *hdev = data;
6647 struct mgmt_cp_unpair_device *cp = cmd->param;
6648
6649 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6650
6651 cmd->cmd_complete(cmd, 0);
6652 mgmt_pending_remove(cmd);
6653 }
6654
6655 bool mgmt_powering_down(struct hci_dev *hdev)
6656 {
6657 struct pending_cmd *cmd;
6658 struct mgmt_mode *cp;
6659
6660 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6661 if (!cmd)
6662 return false;
6663
6664 cp = cmd->param;
6665 if (!cp->val)
6666 return true;
6667
6668 return false;
6669 }
6670
6671 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6672 u8 link_type, u8 addr_type, u8 reason,
6673 bool mgmt_connected)
6674 {
6675 struct mgmt_ev_device_disconnected ev;
6676 struct sock *sk = NULL;
6677
6678 /* The connection is still in hci_conn_hash so test for 1
6679 * instead of 0 to know if this is the last one.
6680 */
6681 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6682 cancel_delayed_work(&hdev->power_off);
6683 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6684 }
6685
6686 if (!mgmt_connected)
6687 return;
6688
6689 if (link_type != ACL_LINK && link_type != LE_LINK)
6690 return;
6691
6692 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6693
6694 bacpy(&ev.addr.bdaddr, bdaddr);
6695 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6696 ev.reason = reason;
6697
6698 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6699
6700 if (sk)
6701 sock_put(sk);
6702
6703 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6704 hdev);
6705 }
6706
6707 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6708 u8 link_type, u8 addr_type, u8 status)
6709 {
6710 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6711 struct mgmt_cp_disconnect *cp;
6712 struct pending_cmd *cmd;
6713
6714 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6715 hdev);
6716
6717 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6718 if (!cmd)
6719 return;
6720
6721 cp = cmd->param;
6722
6723 if (bacmp(bdaddr, &cp->addr.bdaddr))
6724 return;
6725
6726 if (cp->addr.type != bdaddr_type)
6727 return;
6728
6729 cmd->cmd_complete(cmd, mgmt_status(status));
6730 mgmt_pending_remove(cmd);
6731 }
6732
6733 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6734 u8 addr_type, u8 status)
6735 {
6736 struct mgmt_ev_connect_failed ev;
6737
6738 /* The connection is still in hci_conn_hash so test for 1
6739 * instead of 0 to know if this is the last one.
6740 */
6741 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6742 cancel_delayed_work(&hdev->power_off);
6743 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6744 }
6745
6746 bacpy(&ev.addr.bdaddr, bdaddr);
6747 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6748 ev.status = mgmt_status(status);
6749
6750 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6751 }
6752
6753 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6754 {
6755 struct mgmt_ev_pin_code_request ev;
6756
6757 bacpy(&ev.addr.bdaddr, bdaddr);
6758 ev.addr.type = BDADDR_BREDR;
6759 ev.secure = secure;
6760
6761 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6762 }
6763
6764 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6765 u8 status)
6766 {
6767 struct pending_cmd *cmd;
6768
6769 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6770 if (!cmd)
6771 return;
6772
6773 cmd->cmd_complete(cmd, mgmt_status(status));
6774 mgmt_pending_remove(cmd);
6775 }
6776
6777 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6778 u8 status)
6779 {
6780 struct pending_cmd *cmd;
6781
6782 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6783 if (!cmd)
6784 return;
6785
6786 cmd->cmd_complete(cmd, mgmt_status(status));
6787 mgmt_pending_remove(cmd);
6788 }
6789
6790 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6791 u8 link_type, u8 addr_type, u32 value,
6792 u8 confirm_hint)
6793 {
6794 struct mgmt_ev_user_confirm_request ev;
6795
6796 BT_DBG("%s", hdev->name);
6797
6798 bacpy(&ev.addr.bdaddr, bdaddr);
6799 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6800 ev.confirm_hint = confirm_hint;
6801 ev.value = cpu_to_le32(value);
6802
6803 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6804 NULL);
6805 }
6806
6807 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6808 u8 link_type, u8 addr_type)
6809 {
6810 struct mgmt_ev_user_passkey_request ev;
6811
6812 BT_DBG("%s", hdev->name);
6813
6814 bacpy(&ev.addr.bdaddr, bdaddr);
6815 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6816
6817 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6818 NULL);
6819 }
6820
6821 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6822 u8 link_type, u8 addr_type, u8 status,
6823 u8 opcode)
6824 {
6825 struct pending_cmd *cmd;
6826
6827 cmd = mgmt_pending_find(opcode, hdev);
6828 if (!cmd)
6829 return -ENOENT;
6830
6831 cmd->cmd_complete(cmd, mgmt_status(status));
6832 mgmt_pending_remove(cmd);
6833
6834 return 0;
6835 }
6836
6837 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6838 u8 link_type, u8 addr_type, u8 status)
6839 {
6840 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6841 status, MGMT_OP_USER_CONFIRM_REPLY);
6842 }
6843
6844 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6845 u8 link_type, u8 addr_type, u8 status)
6846 {
6847 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6848 status,
6849 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6850 }
6851
6852 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6853 u8 link_type, u8 addr_type, u8 status)
6854 {
6855 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6856 status, MGMT_OP_USER_PASSKEY_REPLY);
6857 }
6858
6859 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6860 u8 link_type, u8 addr_type, u8 status)
6861 {
6862 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6863 status,
6864 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6865 }
6866
6867 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6868 u8 link_type, u8 addr_type, u32 passkey,
6869 u8 entered)
6870 {
6871 struct mgmt_ev_passkey_notify ev;
6872
6873 BT_DBG("%s", hdev->name);
6874
6875 bacpy(&ev.addr.bdaddr, bdaddr);
6876 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6877 ev.passkey = __cpu_to_le32(passkey);
6878 ev.entered = entered;
6879
6880 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6881 }
6882
6883 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6884 {
6885 struct mgmt_ev_auth_failed ev;
6886 struct pending_cmd *cmd;
6887 u8 status = mgmt_status(hci_status);
6888
6889 bacpy(&ev.addr.bdaddr, &conn->dst);
6890 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6891 ev.status = status;
6892
6893 cmd = find_pairing(conn);
6894
6895 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6896 cmd ? cmd->sk : NULL);
6897
6898 if (cmd) {
6899 cmd->cmd_complete(cmd, status);
6900 mgmt_pending_remove(cmd);
6901 }
6902 }
6903
6904 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6905 {
6906 struct cmd_lookup match = { NULL, hdev };
6907 bool changed;
6908
6909 if (status) {
6910 u8 mgmt_err = mgmt_status(status);
6911 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6912 cmd_status_rsp, &mgmt_err);
6913 return;
6914 }
6915
6916 if (test_bit(HCI_AUTH, &hdev->flags))
6917 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6918 &hdev->dev_flags);
6919 else
6920 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6921 &hdev->dev_flags);
6922
6923 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6924 &match);
6925
6926 if (changed)
6927 new_settings(hdev, match.sk);
6928
6929 if (match.sk)
6930 sock_put(match.sk);
6931 }
6932
6933 static void clear_eir(struct hci_request *req)
6934 {
6935 struct hci_dev *hdev = req->hdev;
6936 struct hci_cp_write_eir cp;
6937
6938 if (!lmp_ext_inq_capable(hdev))
6939 return;
6940
6941 memset(hdev->eir, 0, sizeof(hdev->eir));
6942
6943 memset(&cp, 0, sizeof(cp));
6944
6945 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6946 }
6947
6948 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6949 {
6950 struct cmd_lookup match = { NULL, hdev };
6951 struct hci_request req;
6952 bool changed = false;
6953
6954 if (status) {
6955 u8 mgmt_err = mgmt_status(status);
6956
6957 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6958 &hdev->dev_flags)) {
6959 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6960 new_settings(hdev, NULL);
6961 }
6962
6963 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6964 &mgmt_err);
6965 return;
6966 }
6967
6968 if (enable) {
6969 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6970 } else {
6971 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6972 if (!changed)
6973 changed = test_and_clear_bit(HCI_HS_ENABLED,
6974 &hdev->dev_flags);
6975 else
6976 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6977 }
6978
6979 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6980
6981 if (changed)
6982 new_settings(hdev, match.sk);
6983
6984 if (match.sk)
6985 sock_put(match.sk);
6986
6987 hci_req_init(&req, hdev);
6988
6989 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6990 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6991 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6992 sizeof(enable), &enable);
6993 update_eir(&req);
6994 } else {
6995 clear_eir(&req);
6996 }
6997
6998 hci_req_run(&req, NULL);
6999 }
7000
7001 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7002 {
7003 struct cmd_lookup match = { NULL, hdev };
7004 bool changed = false;
7005
7006 if (status) {
7007 u8 mgmt_err = mgmt_status(status);
7008
7009 if (enable) {
7010 if (test_and_clear_bit(HCI_SC_ENABLED,
7011 &hdev->dev_flags))
7012 new_settings(hdev, NULL);
7013 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7014 }
7015
7016 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7017 cmd_status_rsp, &mgmt_err);
7018 return;
7019 }
7020
7021 if (enable) {
7022 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7023 } else {
7024 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7025 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7026 }
7027
7028 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7029 settings_rsp, &match);
7030
7031 if (changed)
7032 new_settings(hdev, match.sk);
7033
7034 if (match.sk)
7035 sock_put(match.sk);
7036 }
7037
7038 static void sk_lookup(struct pending_cmd *cmd, void *data)
7039 {
7040 struct cmd_lookup *match = data;
7041
7042 if (match->sk == NULL) {
7043 match->sk = cmd->sk;
7044 sock_hold(match->sk);
7045 }
7046 }
7047
7048 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7049 u8 status)
7050 {
7051 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7052
7053 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7054 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7055 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7056
7057 if (!status)
7058 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7059 NULL);
7060
7061 if (match.sk)
7062 sock_put(match.sk);
7063 }
7064
7065 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7066 {
7067 struct mgmt_cp_set_local_name ev;
7068 struct pending_cmd *cmd;
7069
7070 if (status)
7071 return;
7072
7073 memset(&ev, 0, sizeof(ev));
7074 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7075 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7076
7077 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7078 if (!cmd) {
7079 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7080
7081 /* If this is a HCI command related to powering on the
7082 * HCI dev don't send any mgmt signals.
7083 */
7084 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7085 return;
7086 }
7087
7088 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7089 cmd ? cmd->sk : NULL);
7090 }
7091
7092 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7093 u8 *rand192, u8 *hash256, u8 *rand256,
7094 u8 status)
7095 {
7096 struct pending_cmd *cmd;
7097
7098 BT_DBG("%s status %u", hdev->name, status);
7099
7100 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7101 if (!cmd)
7102 return;
7103
7104 if (status) {
7105 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7106 mgmt_status(status));
7107 } else {
7108 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7109 struct mgmt_rp_read_local_oob_ext_data rp;
7110
7111 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7112 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7113
7114 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7115 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7116
7117 cmd_complete(cmd->sk, hdev->id,
7118 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7119 &rp, sizeof(rp));
7120 } else {
7121 struct mgmt_rp_read_local_oob_data rp;
7122
7123 memcpy(rp.hash, hash192, sizeof(rp.hash));
7124 memcpy(rp.rand, rand192, sizeof(rp.rand));
7125
7126 cmd_complete(cmd->sk, hdev->id,
7127 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7128 &rp, sizeof(rp));
7129 }
7130 }
7131
7132 mgmt_pending_remove(cmd);
7133 }
7134
7135 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7136 {
7137 int i;
7138
7139 for (i = 0; i < uuid_count; i++) {
7140 if (!memcmp(uuid, uuids[i], 16))
7141 return true;
7142 }
7143
7144 return false;
7145 }
7146
7147 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7148 {
7149 u16 parsed = 0;
7150
7151 while (parsed < eir_len) {
7152 u8 field_len = eir[0];
7153 u8 uuid[16];
7154 int i;
7155
7156 if (field_len == 0)
7157 break;
7158
7159 if (eir_len - parsed < field_len + 1)
7160 break;
7161
7162 switch (eir[1]) {
7163 case EIR_UUID16_ALL:
7164 case EIR_UUID16_SOME:
7165 for (i = 0; i + 3 <= field_len; i += 2) {
7166 memcpy(uuid, bluetooth_base_uuid, 16);
7167 uuid[13] = eir[i + 3];
7168 uuid[12] = eir[i + 2];
7169 if (has_uuid(uuid, uuid_count, uuids))
7170 return true;
7171 }
7172 break;
7173 case EIR_UUID32_ALL:
7174 case EIR_UUID32_SOME:
7175 for (i = 0; i + 5 <= field_len; i += 4) {
7176 memcpy(uuid, bluetooth_base_uuid, 16);
7177 uuid[15] = eir[i + 5];
7178 uuid[14] = eir[i + 4];
7179 uuid[13] = eir[i + 3];
7180 uuid[12] = eir[i + 2];
7181 if (has_uuid(uuid, uuid_count, uuids))
7182 return true;
7183 }
7184 break;
7185 case EIR_UUID128_ALL:
7186 case EIR_UUID128_SOME:
7187 for (i = 0; i + 17 <= field_len; i += 16) {
7188 memcpy(uuid, eir + i + 2, 16);
7189 if (has_uuid(uuid, uuid_count, uuids))
7190 return true;
7191 }
7192 break;
7193 }
7194
7195 parsed += field_len + 1;
7196 eir += field_len + 1;
7197 }
7198
7199 return false;
7200 }
7201
7202 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7203 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7204 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7205 {
7206 char buf[512];
7207 struct mgmt_ev_device_found *ev = (void *) buf;
7208 size_t ev_size;
7209 bool match;
7210
7211 /* Don't send events for a non-kernel initiated discovery. With
7212 * LE one exception is if we have pend_le_reports > 0 in which
7213 * case we're doing passive scanning and want these events.
7214 */
7215 if (!hci_discovery_active(hdev)) {
7216 if (link_type == ACL_LINK)
7217 return;
7218 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7219 return;
7220 }
7221
7222 /* When using service discovery with a RSSI threshold, then check
7223 * if such a RSSI threshold is specified. If a RSSI threshold has
7224 * been specified, then all results with a RSSI smaller than the
7225 * RSSI threshold will be dropped.
7226 *
7227 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7228 * the results are also dropped.
7229 */
7230 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7231 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7232 return;
7233
7234 /* Make sure that the buffer is big enough. The 5 extra bytes
7235 * are for the potential CoD field.
7236 */
7237 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7238 return;
7239
7240 memset(buf, 0, sizeof(buf));
7241
7242 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7243 * RSSI value was reported as 0 when not available. This behavior
7244 * is kept when using device discovery. This is required for full
7245 * backwards compatibility with the API.
7246 *
7247 * However when using service discovery, the value 127 will be
7248 * returned when the RSSI is not available.
7249 */
7250 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7251 link_type == ACL_LINK)
7252 rssi = 0;
7253
7254 bacpy(&ev->addr.bdaddr, bdaddr);
7255 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7256 ev->rssi = rssi;
7257 ev->flags = cpu_to_le32(flags);
7258
7259 if (eir_len > 0) {
7260 /* When using service discovery and a list of UUID is
7261 * provided, results with no matching UUID should be
7262 * dropped. In case there is a match the result is
7263 * kept and checking possible scan response data
7264 * will be skipped.
7265 */
7266 if (hdev->discovery.uuid_count > 0)
7267 match = eir_has_uuids(eir, eir_len,
7268 hdev->discovery.uuid_count,
7269 hdev->discovery.uuids);
7270 else
7271 match = true;
7272
7273 if (!match && !scan_rsp_len)
7274 return;
7275
7276 /* Copy EIR or advertising data into event */
7277 memcpy(ev->eir, eir, eir_len);
7278 } else {
7279 /* When using service discovery and a list of UUID is
7280 * provided, results with empty EIR or advertising data
7281 * should be dropped since they do not match any UUID.
7282 */
7283 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7284 return;
7285
7286 match = false;
7287 }
7288
7289 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7290 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7291 dev_class, 3);
7292
7293 if (scan_rsp_len > 0) {
7294 /* When using service discovery and a list of UUID is
7295 * provided, results with no matching UUID should be
7296 * dropped if there is no previous match from the
7297 * advertising data.
7298 */
7299 if (hdev->discovery.uuid_count > 0) {
7300 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7301 hdev->discovery.uuid_count,
7302 hdev->discovery.uuids))
7303 return;
7304 }
7305
7306 /* Append scan response data to event */
7307 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7308 } else {
7309 /* When using service discovery and a list of UUID is
7310 * provided, results with empty scan response and no
7311 * previous matched advertising data should be dropped.
7312 */
7313 if (hdev->discovery.uuid_count > 0 && !match)
7314 return;
7315 }
7316
7317 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7318 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7319
7320 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7321 }
7322
7323 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7324 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7325 {
7326 struct mgmt_ev_device_found *ev;
7327 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7328 u16 eir_len;
7329
7330 ev = (struct mgmt_ev_device_found *) buf;
7331
7332 memset(buf, 0, sizeof(buf));
7333
7334 bacpy(&ev->addr.bdaddr, bdaddr);
7335 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7336 ev->rssi = rssi;
7337
7338 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7339 name_len);
7340
7341 ev->eir_len = cpu_to_le16(eir_len);
7342
7343 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7344 }
7345
7346 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7347 {
7348 struct mgmt_ev_discovering ev;
7349
7350 BT_DBG("%s discovering %u", hdev->name, discovering);
7351
7352 memset(&ev, 0, sizeof(ev));
7353 ev.type = hdev->discovery.type;
7354 ev.discovering = discovering;
7355
7356 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7357 }
7358
7359 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7360 {
7361 BT_DBG("%s status %u", hdev->name, status);
7362 }
7363
7364 void mgmt_reenable_advertising(struct hci_dev *hdev)
7365 {
7366 struct hci_request req;
7367
7368 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7369 return;
7370
7371 hci_req_init(&req, hdev);
7372 enable_advertising(&req);
7373 hci_req_run(&req, adv_enable_complete);
7374 }
This page took 0.415922 seconds and 6 git commands to generate.