bb02dd1b82bfdf7f739ba2bff165897f9e890ee1
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 8
41
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_READ_INFO,
45 MGMT_OP_SET_POWERED,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_BONDABLE,
50 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_SSP,
52 MGMT_OP_SET_HS,
53 MGMT_OP_SET_LE,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_ADD_UUID,
57 MGMT_OP_REMOVE_UUID,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_DISCONNECT,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_PAIR_DEVICE,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_CONFIRM_NAME,
78 MGMT_OP_BLOCK_DEVICE,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_BREDR,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_SET_PRIVACY,
88 MGMT_OP_LOAD_IRKS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_ADD_DEVICE,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
99 };
100
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_ADDED,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_AUTH_FAILED,
117 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DISCOVERING,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
123 MGMT_EV_NEW_IRK,
124 MGMT_EV_NEW_CSRK,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
131 };
132
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
137
138 struct pending_cmd {
139 struct list_head list;
140 u16 opcode;
141 int index;
142 void *param;
143 size_t param_len;
144 struct sock *sk;
145 void *user_data;
146 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
147 };
148
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
151 MGMT_STATUS_SUCCESS,
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
212 };
213
214 static u8 mgmt_status(u8 hci_status)
215 {
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
218
219 return MGMT_STATUS_FAILED;
220 }
221
222 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
223 struct sock *skip_sk)
224 {
225 struct sk_buff *skb;
226 struct mgmt_hdr *hdr;
227
228 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
229 if (!skb)
230 return -ENOMEM;
231
232 hdr = (void *) skb_put(skb, sizeof(*hdr));
233 hdr->opcode = cpu_to_le16(event);
234 if (hdev)
235 hdr->index = cpu_to_le16(hdev->id);
236 else
237 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
238 hdr->len = cpu_to_le16(data_len);
239
240 if (data)
241 memcpy(skb_put(skb, data_len), data, data_len);
242
243 /* Time stamp */
244 __net_timestamp(skb);
245
246 hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk);
247 kfree_skb(skb);
248
249 return 0;
250 }
251
252 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
253 {
254 struct sk_buff *skb;
255 struct mgmt_hdr *hdr;
256 struct mgmt_ev_cmd_status *ev;
257 int err;
258
259 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
260
261 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
262 if (!skb)
263 return -ENOMEM;
264
265 hdr = (void *) skb_put(skb, sizeof(*hdr));
266
267 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
268 hdr->index = cpu_to_le16(index);
269 hdr->len = cpu_to_le16(sizeof(*ev));
270
271 ev = (void *) skb_put(skb, sizeof(*ev));
272 ev->status = status;
273 ev->opcode = cpu_to_le16(cmd);
274
275 err = sock_queue_rcv_skb(sk, skb);
276 if (err < 0)
277 kfree_skb(skb);
278
279 return err;
280 }
281
282 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
283 void *rp, size_t rp_len)
284 {
285 struct sk_buff *skb;
286 struct mgmt_hdr *hdr;
287 struct mgmt_ev_cmd_complete *ev;
288 int err;
289
290 BT_DBG("sock %p", sk);
291
292 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
293 if (!skb)
294 return -ENOMEM;
295
296 hdr = (void *) skb_put(skb, sizeof(*hdr));
297
298 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
299 hdr->index = cpu_to_le16(index);
300 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
301
302 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
303 ev->opcode = cpu_to_le16(cmd);
304 ev->status = status;
305
306 if (rp)
307 memcpy(ev->data, rp, rp_len);
308
309 err = sock_queue_rcv_skb(sk, skb);
310 if (err < 0)
311 kfree_skb(skb);
312
313 return err;
314 }
315
316 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
317 u16 data_len)
318 {
319 struct mgmt_rp_read_version rp;
320
321 BT_DBG("sock %p", sk);
322
323 rp.version = MGMT_VERSION;
324 rp.revision = cpu_to_le16(MGMT_REVISION);
325
326 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
327 sizeof(rp));
328 }
329
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 u16 data_len)
332 {
333 struct mgmt_rp_read_commands *rp;
334 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
335 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 __le16 *opcode;
337 size_t rp_size;
338 int i, err;
339
340 BT_DBG("sock %p", sk);
341
342 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
343
344 rp = kmalloc(rp_size, GFP_KERNEL);
345 if (!rp)
346 return -ENOMEM;
347
348 rp->num_commands = cpu_to_le16(num_commands);
349 rp->num_events = cpu_to_le16(num_events);
350
351 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
352 put_unaligned_le16(mgmt_commands[i], opcode);
353
354 for (i = 0; i < num_events; i++, opcode++)
355 put_unaligned_le16(mgmt_events[i], opcode);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
358 rp_size);
359 kfree(rp);
360
361 return err;
362 }
363
364 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
365 u16 data_len)
366 {
367 struct mgmt_rp_read_index_list *rp;
368 struct hci_dev *d;
369 size_t rp_len;
370 u16 count;
371 int err;
372
373 BT_DBG("sock %p", sk);
374
375 read_lock(&hci_dev_list_lock);
376
377 count = 0;
378 list_for_each_entry(d, &hci_dev_list, list) {
379 if (d->dev_type == HCI_BREDR &&
380 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
381 count++;
382 }
383
384 rp_len = sizeof(*rp) + (2 * count);
385 rp = kmalloc(rp_len, GFP_ATOMIC);
386 if (!rp) {
387 read_unlock(&hci_dev_list_lock);
388 return -ENOMEM;
389 }
390
391 count = 0;
392 list_for_each_entry(d, &hci_dev_list, list) {
393 if (test_bit(HCI_SETUP, &d->dev_flags) ||
394 test_bit(HCI_CONFIG, &d->dev_flags) ||
395 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
396 continue;
397
398 /* Devices marked as raw-only are neither configured
399 * nor unconfigured controllers.
400 */
401 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
402 continue;
403
404 if (d->dev_type == HCI_BREDR &&
405 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
406 rp->index[count++] = cpu_to_le16(d->id);
407 BT_DBG("Added hci%u", d->id);
408 }
409 }
410
411 rp->num_controllers = cpu_to_le16(count);
412 rp_len = sizeof(*rp) + (2 * count);
413
414 read_unlock(&hci_dev_list_lock);
415
416 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
417 rp_len);
418
419 kfree(rp);
420
421 return err;
422 }
423
424 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
425 void *data, u16 data_len)
426 {
427 struct mgmt_rp_read_unconf_index_list *rp;
428 struct hci_dev *d;
429 size_t rp_len;
430 u16 count;
431 int err;
432
433 BT_DBG("sock %p", sk);
434
435 read_lock(&hci_dev_list_lock);
436
437 count = 0;
438 list_for_each_entry(d, &hci_dev_list, list) {
439 if (d->dev_type == HCI_BREDR &&
440 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
441 count++;
442 }
443
444 rp_len = sizeof(*rp) + (2 * count);
445 rp = kmalloc(rp_len, GFP_ATOMIC);
446 if (!rp) {
447 read_unlock(&hci_dev_list_lock);
448 return -ENOMEM;
449 }
450
451 count = 0;
452 list_for_each_entry(d, &hci_dev_list, list) {
453 if (test_bit(HCI_SETUP, &d->dev_flags) ||
454 test_bit(HCI_CONFIG, &d->dev_flags) ||
455 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
456 continue;
457
458 /* Devices marked as raw-only are neither configured
459 * nor unconfigured controllers.
460 */
461 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
462 continue;
463
464 if (d->dev_type == HCI_BREDR &&
465 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
466 rp->index[count++] = cpu_to_le16(d->id);
467 BT_DBG("Added hci%u", d->id);
468 }
469 }
470
471 rp->num_controllers = cpu_to_le16(count);
472 rp_len = sizeof(*rp) + (2 * count);
473
474 read_unlock(&hci_dev_list_lock);
475
476 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
477 0, rp, rp_len);
478
479 kfree(rp);
480
481 return err;
482 }
483
484 static bool is_configured(struct hci_dev *hdev)
485 {
486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
487 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
488 return false;
489
490 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
491 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 return false;
493
494 return true;
495 }
496
497 static __le32 get_missing_options(struct hci_dev *hdev)
498 {
499 u32 options = 0;
500
501 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
502 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
503 options |= MGMT_OPTION_EXTERNAL_CONFIG;
504
505 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
506 !bacmp(&hdev->public_addr, BDADDR_ANY))
507 options |= MGMT_OPTION_PUBLIC_ADDRESS;
508
509 return cpu_to_le32(options);
510 }
511
512 static int new_options(struct hci_dev *hdev, struct sock *skip)
513 {
514 __le32 options = get_missing_options(hdev);
515
516 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
517 sizeof(options), skip);
518 }
519
520 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
521 {
522 __le32 options = get_missing_options(hdev);
523
524 return cmd_complete(sk, hdev->id, opcode, 0, &options,
525 sizeof(options));
526 }
527
528 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
529 void *data, u16 data_len)
530 {
531 struct mgmt_rp_read_config_info rp;
532 u32 options = 0;
533
534 BT_DBG("sock %p %s", sk, hdev->name);
535
536 hci_dev_lock(hdev);
537
538 memset(&rp, 0, sizeof(rp));
539 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
540
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
542 options |= MGMT_OPTION_EXTERNAL_CONFIG;
543
544 if (hdev->set_bdaddr)
545 options |= MGMT_OPTION_PUBLIC_ADDRESS;
546
547 rp.supported_options = cpu_to_le32(options);
548 rp.missing_options = get_missing_options(hdev);
549
550 hci_dev_unlock(hdev);
551
552 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
553 sizeof(rp));
554 }
555
556 static u32 get_supported_settings(struct hci_dev *hdev)
557 {
558 u32 settings = 0;
559
560 settings |= MGMT_SETTING_POWERED;
561 settings |= MGMT_SETTING_BONDABLE;
562 settings |= MGMT_SETTING_DEBUG_KEYS;
563 settings |= MGMT_SETTING_CONNECTABLE;
564 settings |= MGMT_SETTING_DISCOVERABLE;
565
566 if (lmp_bredr_capable(hdev)) {
567 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
568 settings |= MGMT_SETTING_FAST_CONNECTABLE;
569 settings |= MGMT_SETTING_BREDR;
570 settings |= MGMT_SETTING_LINK_SECURITY;
571
572 if (lmp_ssp_capable(hdev)) {
573 settings |= MGMT_SETTING_SSP;
574 settings |= MGMT_SETTING_HS;
575 }
576
577 if (lmp_sc_capable(hdev))
578 settings |= MGMT_SETTING_SECURE_CONN;
579 }
580
581 if (lmp_le_capable(hdev)) {
582 settings |= MGMT_SETTING_LE;
583 settings |= MGMT_SETTING_ADVERTISING;
584 settings |= MGMT_SETTING_SECURE_CONN;
585 settings |= MGMT_SETTING_PRIVACY;
586 settings |= MGMT_SETTING_STATIC_ADDRESS;
587 }
588
589 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
590 hdev->set_bdaddr)
591 settings |= MGMT_SETTING_CONFIGURATION;
592
593 return settings;
594 }
595
596 static u32 get_current_settings(struct hci_dev *hdev)
597 {
598 u32 settings = 0;
599
600 if (hdev_is_powered(hdev))
601 settings |= MGMT_SETTING_POWERED;
602
603 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_CONNECTABLE;
605
606 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_FAST_CONNECTABLE;
608
609 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
610 settings |= MGMT_SETTING_DISCOVERABLE;
611
612 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
613 settings |= MGMT_SETTING_BONDABLE;
614
615 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
616 settings |= MGMT_SETTING_BREDR;
617
618 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_LE;
620
621 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
622 settings |= MGMT_SETTING_LINK_SECURITY;
623
624 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
625 settings |= MGMT_SETTING_SSP;
626
627 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_HS;
629
630 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
631 settings |= MGMT_SETTING_ADVERTISING;
632
633 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
634 settings |= MGMT_SETTING_SECURE_CONN;
635
636 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
637 settings |= MGMT_SETTING_DEBUG_KEYS;
638
639 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
640 settings |= MGMT_SETTING_PRIVACY;
641
642 /* The current setting for static address has two purposes. The
643 * first is to indicate if the static address will be used and
644 * the second is to indicate if it is actually set.
645 *
646 * This means if the static address is not configured, this flag
647 * will never bet set. If the address is configured, then if the
648 * address is actually used decides if the flag is set or not.
649 *
650 * For single mode LE only controllers and dual-mode controllers
651 * with BR/EDR disabled, the existence of the static address will
652 * be evaluated.
653 */
654 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
655 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
656 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
657 if (bacmp(&hdev->static_addr, BDADDR_ANY))
658 settings |= MGMT_SETTING_STATIC_ADDRESS;
659 }
660
661 return settings;
662 }
663
664 #define PNP_INFO_SVCLASS_ID 0x1200
665
666 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
667 {
668 u8 *ptr = data, *uuids_start = NULL;
669 struct bt_uuid *uuid;
670
671 if (len < 4)
672 return ptr;
673
674 list_for_each_entry(uuid, &hdev->uuids, list) {
675 u16 uuid16;
676
677 if (uuid->size != 16)
678 continue;
679
680 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
681 if (uuid16 < 0x1100)
682 continue;
683
684 if (uuid16 == PNP_INFO_SVCLASS_ID)
685 continue;
686
687 if (!uuids_start) {
688 uuids_start = ptr;
689 uuids_start[0] = 1;
690 uuids_start[1] = EIR_UUID16_ALL;
691 ptr += 2;
692 }
693
694 /* Stop if not enough space to put next UUID */
695 if ((ptr - data) + sizeof(u16) > len) {
696 uuids_start[1] = EIR_UUID16_SOME;
697 break;
698 }
699
700 *ptr++ = (uuid16 & 0x00ff);
701 *ptr++ = (uuid16 & 0xff00) >> 8;
702 uuids_start[0] += sizeof(uuid16);
703 }
704
705 return ptr;
706 }
707
708 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
709 {
710 u8 *ptr = data, *uuids_start = NULL;
711 struct bt_uuid *uuid;
712
713 if (len < 6)
714 return ptr;
715
716 list_for_each_entry(uuid, &hdev->uuids, list) {
717 if (uuid->size != 32)
718 continue;
719
720 if (!uuids_start) {
721 uuids_start = ptr;
722 uuids_start[0] = 1;
723 uuids_start[1] = EIR_UUID32_ALL;
724 ptr += 2;
725 }
726
727 /* Stop if not enough space to put next UUID */
728 if ((ptr - data) + sizeof(u32) > len) {
729 uuids_start[1] = EIR_UUID32_SOME;
730 break;
731 }
732
733 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
734 ptr += sizeof(u32);
735 uuids_start[0] += sizeof(u32);
736 }
737
738 return ptr;
739 }
740
741 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
742 {
743 u8 *ptr = data, *uuids_start = NULL;
744 struct bt_uuid *uuid;
745
746 if (len < 18)
747 return ptr;
748
749 list_for_each_entry(uuid, &hdev->uuids, list) {
750 if (uuid->size != 128)
751 continue;
752
753 if (!uuids_start) {
754 uuids_start = ptr;
755 uuids_start[0] = 1;
756 uuids_start[1] = EIR_UUID128_ALL;
757 ptr += 2;
758 }
759
760 /* Stop if not enough space to put next UUID */
761 if ((ptr - data) + 16 > len) {
762 uuids_start[1] = EIR_UUID128_SOME;
763 break;
764 }
765
766 memcpy(ptr, uuid->uuid, 16);
767 ptr += 16;
768 uuids_start[0] += 16;
769 }
770
771 return ptr;
772 }
773
774 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
775 {
776 struct pending_cmd *cmd;
777
778 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
779 if (cmd->opcode == opcode)
780 return cmd;
781 }
782
783 return NULL;
784 }
785
786 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
787 struct hci_dev *hdev,
788 const void *data)
789 {
790 struct pending_cmd *cmd;
791
792 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
793 if (cmd->user_data != data)
794 continue;
795 if (cmd->opcode == opcode)
796 return cmd;
797 }
798
799 return NULL;
800 }
801
802 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
803 {
804 u8 ad_len = 0;
805 size_t name_len;
806
807 name_len = strlen(hdev->dev_name);
808 if (name_len > 0) {
809 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
810
811 if (name_len > max_len) {
812 name_len = max_len;
813 ptr[1] = EIR_NAME_SHORT;
814 } else
815 ptr[1] = EIR_NAME_COMPLETE;
816
817 ptr[0] = name_len + 1;
818
819 memcpy(ptr + 2, hdev->dev_name, name_len);
820
821 ad_len += (name_len + 2);
822 ptr += (name_len + 2);
823 }
824
825 return ad_len;
826 }
827
828 static void update_scan_rsp_data(struct hci_request *req)
829 {
830 struct hci_dev *hdev = req->hdev;
831 struct hci_cp_le_set_scan_rsp_data cp;
832 u8 len;
833
834 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
835 return;
836
837 memset(&cp, 0, sizeof(cp));
838
839 len = create_scan_rsp_data(hdev, cp.data);
840
841 if (hdev->scan_rsp_data_len == len &&
842 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
843 return;
844
845 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
846 hdev->scan_rsp_data_len = len;
847
848 cp.length = len;
849
850 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
851 }
852
853 static u8 get_adv_discov_flags(struct hci_dev *hdev)
854 {
855 struct pending_cmd *cmd;
856
857 /* If there's a pending mgmt command the flags will not yet have
858 * their final values, so check for this first.
859 */
860 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
861 if (cmd) {
862 struct mgmt_mode *cp = cmd->param;
863 if (cp->val == 0x01)
864 return LE_AD_GENERAL;
865 else if (cp->val == 0x02)
866 return LE_AD_LIMITED;
867 } else {
868 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
869 return LE_AD_LIMITED;
870 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
871 return LE_AD_GENERAL;
872 }
873
874 return 0;
875 }
876
877 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
878 {
879 u8 ad_len = 0, flags = 0;
880
881 flags |= get_adv_discov_flags(hdev);
882
883 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
884 flags |= LE_AD_NO_BREDR;
885
886 if (flags) {
887 BT_DBG("adv flags 0x%02x", flags);
888
889 ptr[0] = 2;
890 ptr[1] = EIR_FLAGS;
891 ptr[2] = flags;
892
893 ad_len += 3;
894 ptr += 3;
895 }
896
897 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
898 ptr[0] = 2;
899 ptr[1] = EIR_TX_POWER;
900 ptr[2] = (u8) hdev->adv_tx_power;
901
902 ad_len += 3;
903 ptr += 3;
904 }
905
906 return ad_len;
907 }
908
909 static void update_adv_data(struct hci_request *req)
910 {
911 struct hci_dev *hdev = req->hdev;
912 struct hci_cp_le_set_adv_data cp;
913 u8 len;
914
915 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
916 return;
917
918 memset(&cp, 0, sizeof(cp));
919
920 len = create_adv_data(hdev, cp.data);
921
922 if (hdev->adv_data_len == len &&
923 memcmp(cp.data, hdev->adv_data, len) == 0)
924 return;
925
926 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
927 hdev->adv_data_len = len;
928
929 cp.length = len;
930
931 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
932 }
933
934 int mgmt_update_adv_data(struct hci_dev *hdev)
935 {
936 struct hci_request req;
937
938 hci_req_init(&req, hdev);
939 update_adv_data(&req);
940
941 return hci_req_run(&req, NULL);
942 }
943
944 static void create_eir(struct hci_dev *hdev, u8 *data)
945 {
946 u8 *ptr = data;
947 size_t name_len;
948
949 name_len = strlen(hdev->dev_name);
950
951 if (name_len > 0) {
952 /* EIR Data type */
953 if (name_len > 48) {
954 name_len = 48;
955 ptr[1] = EIR_NAME_SHORT;
956 } else
957 ptr[1] = EIR_NAME_COMPLETE;
958
959 /* EIR Data length */
960 ptr[0] = name_len + 1;
961
962 memcpy(ptr + 2, hdev->dev_name, name_len);
963
964 ptr += (name_len + 2);
965 }
966
967 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
968 ptr[0] = 2;
969 ptr[1] = EIR_TX_POWER;
970 ptr[2] = (u8) hdev->inq_tx_power;
971
972 ptr += 3;
973 }
974
975 if (hdev->devid_source > 0) {
976 ptr[0] = 9;
977 ptr[1] = EIR_DEVICE_ID;
978
979 put_unaligned_le16(hdev->devid_source, ptr + 2);
980 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
981 put_unaligned_le16(hdev->devid_product, ptr + 6);
982 put_unaligned_le16(hdev->devid_version, ptr + 8);
983
984 ptr += 10;
985 }
986
987 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
988 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
989 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
990 }
991
992 static void update_eir(struct hci_request *req)
993 {
994 struct hci_dev *hdev = req->hdev;
995 struct hci_cp_write_eir cp;
996
997 if (!hdev_is_powered(hdev))
998 return;
999
1000 if (!lmp_ext_inq_capable(hdev))
1001 return;
1002
1003 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1004 return;
1005
1006 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1007 return;
1008
1009 memset(&cp, 0, sizeof(cp));
1010
1011 create_eir(hdev, cp.data);
1012
1013 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1014 return;
1015
1016 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1017
1018 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1019 }
1020
1021 static u8 get_service_classes(struct hci_dev *hdev)
1022 {
1023 struct bt_uuid *uuid;
1024 u8 val = 0;
1025
1026 list_for_each_entry(uuid, &hdev->uuids, list)
1027 val |= uuid->svc_hint;
1028
1029 return val;
1030 }
1031
1032 static void update_class(struct hci_request *req)
1033 {
1034 struct hci_dev *hdev = req->hdev;
1035 u8 cod[3];
1036
1037 BT_DBG("%s", hdev->name);
1038
1039 if (!hdev_is_powered(hdev))
1040 return;
1041
1042 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1043 return;
1044
1045 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1046 return;
1047
1048 cod[0] = hdev->minor_class;
1049 cod[1] = hdev->major_class;
1050 cod[2] = get_service_classes(hdev);
1051
1052 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1053 cod[1] |= 0x20;
1054
1055 if (memcmp(cod, hdev->dev_class, 3) == 0)
1056 return;
1057
1058 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1059 }
1060
1061 static bool get_connectable(struct hci_dev *hdev)
1062 {
1063 struct pending_cmd *cmd;
1064
1065 /* If there's a pending mgmt command the flag will not yet have
1066 * it's final value, so check for this first.
1067 */
1068 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1069 if (cmd) {
1070 struct mgmt_mode *cp = cmd->param;
1071 return cp->val;
1072 }
1073
1074 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1075 }
1076
1077 static void disable_advertising(struct hci_request *req)
1078 {
1079 u8 enable = 0x00;
1080
1081 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1082 }
1083
1084 static void enable_advertising(struct hci_request *req)
1085 {
1086 struct hci_dev *hdev = req->hdev;
1087 struct hci_cp_le_set_adv_param cp;
1088 u8 own_addr_type, enable = 0x01;
1089 bool connectable;
1090
1091 if (hci_conn_num(hdev, LE_LINK) > 0)
1092 return;
1093
1094 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1095 disable_advertising(req);
1096
1097 /* Clear the HCI_LE_ADV bit temporarily so that the
1098 * hci_update_random_address knows that it's safe to go ahead
1099 * and write a new random address. The flag will be set back on
1100 * as soon as the SET_ADV_ENABLE HCI command completes.
1101 */
1102 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1103
1104 connectable = get_connectable(hdev);
1105
1106 /* Set require_privacy to true only when non-connectable
1107 * advertising is used. In that case it is fine to use a
1108 * non-resolvable private address.
1109 */
1110 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1111 return;
1112
1113 memset(&cp, 0, sizeof(cp));
1114 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1115 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1116 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1117 cp.own_address_type = own_addr_type;
1118 cp.channel_map = hdev->le_adv_channel_map;
1119
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1121
1122 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1123 }
1124
1125 static void service_cache_off(struct work_struct *work)
1126 {
1127 struct hci_dev *hdev = container_of(work, struct hci_dev,
1128 service_cache.work);
1129 struct hci_request req;
1130
1131 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1132 return;
1133
1134 hci_req_init(&req, hdev);
1135
1136 hci_dev_lock(hdev);
1137
1138 update_eir(&req);
1139 update_class(&req);
1140
1141 hci_dev_unlock(hdev);
1142
1143 hci_req_run(&req, NULL);
1144 }
1145
1146 static void rpa_expired(struct work_struct *work)
1147 {
1148 struct hci_dev *hdev = container_of(work, struct hci_dev,
1149 rpa_expired.work);
1150 struct hci_request req;
1151
1152 BT_DBG("");
1153
1154 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1155
1156 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1157 return;
1158
1159 /* The generation of a new RPA and programming it into the
1160 * controller happens in the enable_advertising() function.
1161 */
1162 hci_req_init(&req, hdev);
1163 enable_advertising(&req);
1164 hci_req_run(&req, NULL);
1165 }
1166
1167 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1168 {
1169 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1170 return;
1171
1172 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1173 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1174
1175 /* Non-mgmt controlled devices get this bit set
1176 * implicitly so that pairing works for them, however
1177 * for mgmt we require user-space to explicitly enable
1178 * it
1179 */
1180 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1181 }
1182
1183 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1184 void *data, u16 data_len)
1185 {
1186 struct mgmt_rp_read_info rp;
1187
1188 BT_DBG("sock %p %s", sk, hdev->name);
1189
1190 hci_dev_lock(hdev);
1191
1192 memset(&rp, 0, sizeof(rp));
1193
1194 bacpy(&rp.bdaddr, &hdev->bdaddr);
1195
1196 rp.version = hdev->hci_ver;
1197 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1198
1199 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1200 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1201
1202 memcpy(rp.dev_class, hdev->dev_class, 3);
1203
1204 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1205 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1206
1207 hci_dev_unlock(hdev);
1208
1209 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1210 sizeof(rp));
1211 }
1212
1213 static void mgmt_pending_free(struct pending_cmd *cmd)
1214 {
1215 sock_put(cmd->sk);
1216 kfree(cmd->param);
1217 kfree(cmd);
1218 }
1219
1220 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1221 struct hci_dev *hdev, void *data,
1222 u16 len)
1223 {
1224 struct pending_cmd *cmd;
1225
1226 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1227 if (!cmd)
1228 return NULL;
1229
1230 cmd->opcode = opcode;
1231 cmd->index = hdev->id;
1232
1233 cmd->param = kmemdup(data, len, GFP_KERNEL);
1234 if (!cmd->param) {
1235 kfree(cmd);
1236 return NULL;
1237 }
1238
1239 cmd->param_len = len;
1240
1241 cmd->sk = sk;
1242 sock_hold(sk);
1243
1244 list_add(&cmd->list, &hdev->mgmt_pending);
1245
1246 return cmd;
1247 }
1248
1249 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1250 void (*cb)(struct pending_cmd *cmd,
1251 void *data),
1252 void *data)
1253 {
1254 struct pending_cmd *cmd, *tmp;
1255
1256 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1257 if (opcode > 0 && cmd->opcode != opcode)
1258 continue;
1259
1260 cb(cmd, data);
1261 }
1262 }
1263
1264 static void mgmt_pending_remove(struct pending_cmd *cmd)
1265 {
1266 list_del(&cmd->list);
1267 mgmt_pending_free(cmd);
1268 }
1269
1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1271 {
1272 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1273
1274 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1275 sizeof(settings));
1276 }
1277
1278 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1279 {
1280 BT_DBG("%s status 0x%02x", hdev->name, status);
1281
1282 if (hci_conn_count(hdev) == 0) {
1283 cancel_delayed_work(&hdev->power_off);
1284 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1285 }
1286 }
1287
1288 static bool hci_stop_discovery(struct hci_request *req)
1289 {
1290 struct hci_dev *hdev = req->hdev;
1291 struct hci_cp_remote_name_req_cancel cp;
1292 struct inquiry_entry *e;
1293
1294 switch (hdev->discovery.state) {
1295 case DISCOVERY_FINDING:
1296 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1297 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1298 } else {
1299 cancel_delayed_work(&hdev->le_scan_disable);
1300 hci_req_add_le_scan_disable(req);
1301 }
1302
1303 return true;
1304
1305 case DISCOVERY_RESOLVING:
1306 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1307 NAME_PENDING);
1308 if (!e)
1309 break;
1310
1311 bacpy(&cp.bdaddr, &e->data.bdaddr);
1312 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1313 &cp);
1314
1315 return true;
1316
1317 default:
1318 /* Passive scanning */
1319 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1320 hci_req_add_le_scan_disable(req);
1321 return true;
1322 }
1323
1324 break;
1325 }
1326
1327 return false;
1328 }
1329
1330 static int clean_up_hci_state(struct hci_dev *hdev)
1331 {
1332 struct hci_request req;
1333 struct hci_conn *conn;
1334 bool discov_stopped;
1335 int err;
1336
1337 hci_req_init(&req, hdev);
1338
1339 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1340 test_bit(HCI_PSCAN, &hdev->flags)) {
1341 u8 scan = 0x00;
1342 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1343 }
1344
1345 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1346 disable_advertising(&req);
1347
1348 discov_stopped = hci_stop_discovery(&req);
1349
1350 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1351 struct hci_cp_disconnect dc;
1352 struct hci_cp_reject_conn_req rej;
1353
1354 switch (conn->state) {
1355 case BT_CONNECTED:
1356 case BT_CONFIG:
1357 dc.handle = cpu_to_le16(conn->handle);
1358 dc.reason = 0x15; /* Terminated due to Power Off */
1359 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1360 break;
1361 case BT_CONNECT:
1362 if (conn->type == LE_LINK)
1363 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1364 0, NULL);
1365 else if (conn->type == ACL_LINK)
1366 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1367 6, &conn->dst);
1368 break;
1369 case BT_CONNECT2:
1370 bacpy(&rej.bdaddr, &conn->dst);
1371 rej.reason = 0x15; /* Terminated due to Power Off */
1372 if (conn->type == ACL_LINK)
1373 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1374 sizeof(rej), &rej);
1375 else if (conn->type == SCO_LINK)
1376 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1377 sizeof(rej), &rej);
1378 break;
1379 }
1380 }
1381
1382 err = hci_req_run(&req, clean_up_hci_complete);
1383 if (!err && discov_stopped)
1384 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1385
1386 return err;
1387 }
1388
1389 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1390 u16 len)
1391 {
1392 struct mgmt_mode *cp = data;
1393 struct pending_cmd *cmd;
1394 int err;
1395
1396 BT_DBG("request for %s", hdev->name);
1397
1398 if (cp->val != 0x00 && cp->val != 0x01)
1399 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1400 MGMT_STATUS_INVALID_PARAMS);
1401
1402 hci_dev_lock(hdev);
1403
1404 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1405 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1406 MGMT_STATUS_BUSY);
1407 goto failed;
1408 }
1409
1410 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1411 cancel_delayed_work(&hdev->power_off);
1412
1413 if (cp->val) {
1414 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1415 data, len);
1416 err = mgmt_powered(hdev, 1);
1417 goto failed;
1418 }
1419 }
1420
1421 if (!!cp->val == hdev_is_powered(hdev)) {
1422 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1423 goto failed;
1424 }
1425
1426 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1427 if (!cmd) {
1428 err = -ENOMEM;
1429 goto failed;
1430 }
1431
1432 if (cp->val) {
1433 queue_work(hdev->req_workqueue, &hdev->power_on);
1434 err = 0;
1435 } else {
1436 /* Disconnect connections, stop scans, etc */
1437 err = clean_up_hci_state(hdev);
1438 if (!err)
1439 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1440 HCI_POWER_OFF_TIMEOUT);
1441
1442 /* ENODATA means there were no HCI commands queued */
1443 if (err == -ENODATA) {
1444 cancel_delayed_work(&hdev->power_off);
1445 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1446 err = 0;
1447 }
1448 }
1449
1450 failed:
1451 hci_dev_unlock(hdev);
1452 return err;
1453 }
1454
1455 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1456 {
1457 __le32 ev;
1458
1459 ev = cpu_to_le32(get_current_settings(hdev));
1460
1461 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1462 }
1463
1464 int mgmt_new_settings(struct hci_dev *hdev)
1465 {
1466 return new_settings(hdev, NULL);
1467 }
1468
1469 struct cmd_lookup {
1470 struct sock *sk;
1471 struct hci_dev *hdev;
1472 u8 mgmt_status;
1473 };
1474
1475 static void settings_rsp(struct pending_cmd *cmd, void *data)
1476 {
1477 struct cmd_lookup *match = data;
1478
1479 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1480
1481 list_del(&cmd->list);
1482
1483 if (match->sk == NULL) {
1484 match->sk = cmd->sk;
1485 sock_hold(match->sk);
1486 }
1487
1488 mgmt_pending_free(cmd);
1489 }
1490
1491 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1492 {
1493 u8 *status = data;
1494
1495 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1496 mgmt_pending_remove(cmd);
1497 }
1498
1499 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1500 {
1501 if (cmd->cmd_complete) {
1502 u8 *status = data;
1503
1504 cmd->cmd_complete(cmd, *status);
1505 mgmt_pending_remove(cmd);
1506
1507 return;
1508 }
1509
1510 cmd_status_rsp(cmd, data);
1511 }
1512
1513 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1514 {
1515 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1516 cmd->param, cmd->param_len);
1517 }
1518
1519 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1520 {
1521 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1522 sizeof(struct mgmt_addr_info));
1523 }
1524
1525 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1526 {
1527 if (!lmp_bredr_capable(hdev))
1528 return MGMT_STATUS_NOT_SUPPORTED;
1529 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1530 return MGMT_STATUS_REJECTED;
1531 else
1532 return MGMT_STATUS_SUCCESS;
1533 }
1534
1535 static u8 mgmt_le_support(struct hci_dev *hdev)
1536 {
1537 if (!lmp_le_capable(hdev))
1538 return MGMT_STATUS_NOT_SUPPORTED;
1539 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1540 return MGMT_STATUS_REJECTED;
1541 else
1542 return MGMT_STATUS_SUCCESS;
1543 }
1544
1545 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1546 u16 opcode)
1547 {
1548 struct pending_cmd *cmd;
1549 struct mgmt_mode *cp;
1550 struct hci_request req;
1551 bool changed;
1552
1553 BT_DBG("status 0x%02x", status);
1554
1555 hci_dev_lock(hdev);
1556
1557 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1558 if (!cmd)
1559 goto unlock;
1560
1561 if (status) {
1562 u8 mgmt_err = mgmt_status(status);
1563 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1564 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1565 goto remove_cmd;
1566 }
1567
1568 cp = cmd->param;
1569 if (cp->val) {
1570 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1571 &hdev->dev_flags);
1572
1573 if (hdev->discov_timeout > 0) {
1574 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1575 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1576 to);
1577 }
1578 } else {
1579 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1580 &hdev->dev_flags);
1581 }
1582
1583 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1584
1585 if (changed)
1586 new_settings(hdev, cmd->sk);
1587
1588 /* When the discoverable mode gets changed, make sure
1589 * that class of device has the limited discoverable
1590 * bit correctly set. Also update page scan based on whitelist
1591 * entries.
1592 */
1593 hci_req_init(&req, hdev);
1594 __hci_update_page_scan(&req);
1595 update_class(&req);
1596 hci_req_run(&req, NULL);
1597
1598 remove_cmd:
1599 mgmt_pending_remove(cmd);
1600
1601 unlock:
1602 hci_dev_unlock(hdev);
1603 }
1604
1605 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1606 u16 len)
1607 {
1608 struct mgmt_cp_set_discoverable *cp = data;
1609 struct pending_cmd *cmd;
1610 struct hci_request req;
1611 u16 timeout;
1612 u8 scan;
1613 int err;
1614
1615 BT_DBG("request for %s", hdev->name);
1616
1617 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1618 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1619 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1620 MGMT_STATUS_REJECTED);
1621
1622 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1623 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_INVALID_PARAMS);
1625
1626 timeout = __le16_to_cpu(cp->timeout);
1627
1628 /* Disabling discoverable requires that no timeout is set,
1629 * and enabling limited discoverable requires a timeout.
1630 */
1631 if ((cp->val == 0x00 && timeout > 0) ||
1632 (cp->val == 0x02 && timeout == 0))
1633 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1634 MGMT_STATUS_INVALID_PARAMS);
1635
1636 hci_dev_lock(hdev);
1637
1638 if (!hdev_is_powered(hdev) && timeout > 0) {
1639 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1640 MGMT_STATUS_NOT_POWERED);
1641 goto failed;
1642 }
1643
1644 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1645 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1646 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1647 MGMT_STATUS_BUSY);
1648 goto failed;
1649 }
1650
1651 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1653 MGMT_STATUS_REJECTED);
1654 goto failed;
1655 }
1656
1657 if (!hdev_is_powered(hdev)) {
1658 bool changed = false;
1659
1660 /* Setting limited discoverable when powered off is
1661 * not a valid operation since it requires a timeout
1662 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1663 */
1664 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1665 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1666 changed = true;
1667 }
1668
1669 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1670 if (err < 0)
1671 goto failed;
1672
1673 if (changed)
1674 err = new_settings(hdev, sk);
1675
1676 goto failed;
1677 }
1678
1679 /* If the current mode is the same, then just update the timeout
1680 * value with the new value. And if only the timeout gets updated,
1681 * then no need for any HCI transactions.
1682 */
1683 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1684 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1685 &hdev->dev_flags)) {
1686 cancel_delayed_work(&hdev->discov_off);
1687 hdev->discov_timeout = timeout;
1688
1689 if (cp->val && hdev->discov_timeout > 0) {
1690 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1691 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1692 to);
1693 }
1694
1695 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1696 goto failed;
1697 }
1698
1699 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1700 if (!cmd) {
1701 err = -ENOMEM;
1702 goto failed;
1703 }
1704
1705 /* Cancel any potential discoverable timeout that might be
1706 * still active and store new timeout value. The arming of
1707 * the timeout happens in the complete handler.
1708 */
1709 cancel_delayed_work(&hdev->discov_off);
1710 hdev->discov_timeout = timeout;
1711
1712 /* Limited discoverable mode */
1713 if (cp->val == 0x02)
1714 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1715 else
1716 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1717
1718 hci_req_init(&req, hdev);
1719
1720 /* The procedure for LE-only controllers is much simpler - just
1721 * update the advertising data.
1722 */
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724 goto update_ad;
1725
1726 scan = SCAN_PAGE;
1727
1728 if (cp->val) {
1729 struct hci_cp_write_current_iac_lap hci_cp;
1730
1731 if (cp->val == 0x02) {
1732 /* Limited discoverable mode */
1733 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1734 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1735 hci_cp.iac_lap[1] = 0x8b;
1736 hci_cp.iac_lap[2] = 0x9e;
1737 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1738 hci_cp.iac_lap[4] = 0x8b;
1739 hci_cp.iac_lap[5] = 0x9e;
1740 } else {
1741 /* General discoverable mode */
1742 hci_cp.num_iac = 1;
1743 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1744 hci_cp.iac_lap[1] = 0x8b;
1745 hci_cp.iac_lap[2] = 0x9e;
1746 }
1747
1748 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1749 (hci_cp.num_iac * 3) + 1, &hci_cp);
1750
1751 scan |= SCAN_INQUIRY;
1752 } else {
1753 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1754 }
1755
1756 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1757
1758 update_ad:
1759 update_adv_data(&req);
1760
1761 err = hci_req_run(&req, set_discoverable_complete);
1762 if (err < 0)
1763 mgmt_pending_remove(cmd);
1764
1765 failed:
1766 hci_dev_unlock(hdev);
1767 return err;
1768 }
1769
1770 static void write_fast_connectable(struct hci_request *req, bool enable)
1771 {
1772 struct hci_dev *hdev = req->hdev;
1773 struct hci_cp_write_page_scan_activity acp;
1774 u8 type;
1775
1776 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1777 return;
1778
1779 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1780 return;
1781
1782 if (enable) {
1783 type = PAGE_SCAN_TYPE_INTERLACED;
1784
1785 /* 160 msec page scan interval */
1786 acp.interval = cpu_to_le16(0x0100);
1787 } else {
1788 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1789
1790 /* default 1.28 sec page scan */
1791 acp.interval = cpu_to_le16(0x0800);
1792 }
1793
1794 acp.window = cpu_to_le16(0x0012);
1795
1796 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1797 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1798 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1799 sizeof(acp), &acp);
1800
1801 if (hdev->page_scan_type != type)
1802 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1803 }
1804
1805 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1806 u16 opcode)
1807 {
1808 struct pending_cmd *cmd;
1809 struct mgmt_mode *cp;
1810 bool conn_changed, discov_changed;
1811
1812 BT_DBG("status 0x%02x", status);
1813
1814 hci_dev_lock(hdev);
1815
1816 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (!cmd)
1818 goto unlock;
1819
1820 if (status) {
1821 u8 mgmt_err = mgmt_status(status);
1822 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1823 goto remove_cmd;
1824 }
1825
1826 cp = cmd->param;
1827 if (cp->val) {
1828 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1829 &hdev->dev_flags);
1830 discov_changed = false;
1831 } else {
1832 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1833 &hdev->dev_flags);
1834 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1835 &hdev->dev_flags);
1836 }
1837
1838 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1839
1840 if (conn_changed || discov_changed) {
1841 new_settings(hdev, cmd->sk);
1842 hci_update_page_scan(hdev);
1843 if (discov_changed)
1844 mgmt_update_adv_data(hdev);
1845 hci_update_background_scan(hdev);
1846 }
1847
1848 remove_cmd:
1849 mgmt_pending_remove(cmd);
1850
1851 unlock:
1852 hci_dev_unlock(hdev);
1853 }
1854
1855 static int set_connectable_update_settings(struct hci_dev *hdev,
1856 struct sock *sk, u8 val)
1857 {
1858 bool changed = false;
1859 int err;
1860
1861 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1862 changed = true;
1863
1864 if (val) {
1865 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1866 } else {
1867 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1868 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1869 }
1870
1871 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1872 if (err < 0)
1873 return err;
1874
1875 if (changed) {
1876 hci_update_page_scan(hdev);
1877 hci_update_background_scan(hdev);
1878 return new_settings(hdev, sk);
1879 }
1880
1881 return 0;
1882 }
1883
1884 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1885 u16 len)
1886 {
1887 struct mgmt_mode *cp = data;
1888 struct pending_cmd *cmd;
1889 struct hci_request req;
1890 u8 scan;
1891 int err;
1892
1893 BT_DBG("request for %s", hdev->name);
1894
1895 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1896 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1897 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1898 MGMT_STATUS_REJECTED);
1899
1900 if (cp->val != 0x00 && cp->val != 0x01)
1901 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1902 MGMT_STATUS_INVALID_PARAMS);
1903
1904 hci_dev_lock(hdev);
1905
1906 if (!hdev_is_powered(hdev)) {
1907 err = set_connectable_update_settings(hdev, sk, cp->val);
1908 goto failed;
1909 }
1910
1911 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1912 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1913 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1914 MGMT_STATUS_BUSY);
1915 goto failed;
1916 }
1917
1918 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1919 if (!cmd) {
1920 err = -ENOMEM;
1921 goto failed;
1922 }
1923
1924 hci_req_init(&req, hdev);
1925
1926 /* If BR/EDR is not enabled and we disable advertising as a
1927 * by-product of disabling connectable, we need to update the
1928 * advertising flags.
1929 */
1930 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1931 if (!cp->val) {
1932 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1933 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1934 }
1935 update_adv_data(&req);
1936 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1937 if (cp->val) {
1938 scan = SCAN_PAGE;
1939 } else {
1940 /* If we don't have any whitelist entries just
1941 * disable all scanning. If there are entries
1942 * and we had both page and inquiry scanning
1943 * enabled then fall back to only page scanning.
1944 * Otherwise no changes are needed.
1945 */
1946 if (list_empty(&hdev->whitelist))
1947 scan = SCAN_DISABLED;
1948 else if (test_bit(HCI_ISCAN, &hdev->flags))
1949 scan = SCAN_PAGE;
1950 else
1951 goto no_scan_update;
1952
1953 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1954 hdev->discov_timeout > 0)
1955 cancel_delayed_work(&hdev->discov_off);
1956 }
1957
1958 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1959 }
1960
1961 no_scan_update:
1962 /* If we're going from non-connectable to connectable or
1963 * vice-versa when fast connectable is enabled ensure that fast
1964 * connectable gets disabled. write_fast_connectable won't do
1965 * anything if the page scan parameters are already what they
1966 * should be.
1967 */
1968 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1969 write_fast_connectable(&req, false);
1970
1971 /* Update the advertising parameters if necessary */
1972 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1973 enable_advertising(&req);
1974
1975 err = hci_req_run(&req, set_connectable_complete);
1976 if (err < 0) {
1977 mgmt_pending_remove(cmd);
1978 if (err == -ENODATA)
1979 err = set_connectable_update_settings(hdev, sk,
1980 cp->val);
1981 goto failed;
1982 }
1983
1984 failed:
1985 hci_dev_unlock(hdev);
1986 return err;
1987 }
1988
1989 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1990 u16 len)
1991 {
1992 struct mgmt_mode *cp = data;
1993 bool changed;
1994 int err;
1995
1996 BT_DBG("request for %s", hdev->name);
1997
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2000 MGMT_STATUS_INVALID_PARAMS);
2001
2002 hci_dev_lock(hdev);
2003
2004 if (cp->val)
2005 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
2006 else
2007 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
2008
2009 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2010 if (err < 0)
2011 goto unlock;
2012
2013 if (changed)
2014 err = new_settings(hdev, sk);
2015
2016 unlock:
2017 hci_dev_unlock(hdev);
2018 return err;
2019 }
2020
2021 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2022 u16 len)
2023 {
2024 struct mgmt_mode *cp = data;
2025 struct pending_cmd *cmd;
2026 u8 val, status;
2027 int err;
2028
2029 BT_DBG("request for %s", hdev->name);
2030
2031 status = mgmt_bredr_support(hdev);
2032 if (status)
2033 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2034 status);
2035
2036 if (cp->val != 0x00 && cp->val != 0x01)
2037 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2038 MGMT_STATUS_INVALID_PARAMS);
2039
2040 hci_dev_lock(hdev);
2041
2042 if (!hdev_is_powered(hdev)) {
2043 bool changed = false;
2044
2045 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2046 &hdev->dev_flags)) {
2047 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2048 changed = true;
2049 }
2050
2051 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2052 if (err < 0)
2053 goto failed;
2054
2055 if (changed)
2056 err = new_settings(hdev, sk);
2057
2058 goto failed;
2059 }
2060
2061 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2063 MGMT_STATUS_BUSY);
2064 goto failed;
2065 }
2066
2067 val = !!cp->val;
2068
2069 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2070 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2071 goto failed;
2072 }
2073
2074 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2075 if (!cmd) {
2076 err = -ENOMEM;
2077 goto failed;
2078 }
2079
2080 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2081 if (err < 0) {
2082 mgmt_pending_remove(cmd);
2083 goto failed;
2084 }
2085
2086 failed:
2087 hci_dev_unlock(hdev);
2088 return err;
2089 }
2090
2091 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2092 {
2093 struct mgmt_mode *cp = data;
2094 struct pending_cmd *cmd;
2095 u8 status;
2096 int err;
2097
2098 BT_DBG("request for %s", hdev->name);
2099
2100 status = mgmt_bredr_support(hdev);
2101 if (status)
2102 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2103
2104 if (!lmp_ssp_capable(hdev))
2105 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2106 MGMT_STATUS_NOT_SUPPORTED);
2107
2108 if (cp->val != 0x00 && cp->val != 0x01)
2109 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2110 MGMT_STATUS_INVALID_PARAMS);
2111
2112 hci_dev_lock(hdev);
2113
2114 if (!hdev_is_powered(hdev)) {
2115 bool changed;
2116
2117 if (cp->val) {
2118 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2119 &hdev->dev_flags);
2120 } else {
2121 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2122 &hdev->dev_flags);
2123 if (!changed)
2124 changed = test_and_clear_bit(HCI_HS_ENABLED,
2125 &hdev->dev_flags);
2126 else
2127 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2128 }
2129
2130 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2131 if (err < 0)
2132 goto failed;
2133
2134 if (changed)
2135 err = new_settings(hdev, sk);
2136
2137 goto failed;
2138 }
2139
2140 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2141 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2142 MGMT_STATUS_BUSY);
2143 goto failed;
2144 }
2145
2146 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2147 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2148 goto failed;
2149 }
2150
2151 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2152 if (!cmd) {
2153 err = -ENOMEM;
2154 goto failed;
2155 }
2156
2157 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2158 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2159 sizeof(cp->val), &cp->val);
2160
2161 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2162 if (err < 0) {
2163 mgmt_pending_remove(cmd);
2164 goto failed;
2165 }
2166
2167 failed:
2168 hci_dev_unlock(hdev);
2169 return err;
2170 }
2171
2172 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2173 {
2174 struct mgmt_mode *cp = data;
2175 bool changed;
2176 u8 status;
2177 int err;
2178
2179 BT_DBG("request for %s", hdev->name);
2180
2181 status = mgmt_bredr_support(hdev);
2182 if (status)
2183 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2184
2185 if (!lmp_ssp_capable(hdev))
2186 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2187 MGMT_STATUS_NOT_SUPPORTED);
2188
2189 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2190 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2191 MGMT_STATUS_REJECTED);
2192
2193 if (cp->val != 0x00 && cp->val != 0x01)
2194 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2195 MGMT_STATUS_INVALID_PARAMS);
2196
2197 hci_dev_lock(hdev);
2198
2199 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2200 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2201 MGMT_STATUS_BUSY);
2202 goto unlock;
2203 }
2204
2205 if (cp->val) {
2206 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2207 } else {
2208 if (hdev_is_powered(hdev)) {
2209 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2210 MGMT_STATUS_REJECTED);
2211 goto unlock;
2212 }
2213
2214 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2215 }
2216
2217 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2218 if (err < 0)
2219 goto unlock;
2220
2221 if (changed)
2222 err = new_settings(hdev, sk);
2223
2224 unlock:
2225 hci_dev_unlock(hdev);
2226 return err;
2227 }
2228
2229 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2230 {
2231 struct cmd_lookup match = { NULL, hdev };
2232
2233 hci_dev_lock(hdev);
2234
2235 if (status) {
2236 u8 mgmt_err = mgmt_status(status);
2237
2238 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2239 &mgmt_err);
2240 goto unlock;
2241 }
2242
2243 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2244
2245 new_settings(hdev, match.sk);
2246
2247 if (match.sk)
2248 sock_put(match.sk);
2249
2250 /* Make sure the controller has a good default for
2251 * advertising data. Restrict the update to when LE
2252 * has actually been enabled. During power on, the
2253 * update in powered_update_hci will take care of it.
2254 */
2255 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2256 struct hci_request req;
2257
2258 hci_req_init(&req, hdev);
2259 update_adv_data(&req);
2260 update_scan_rsp_data(&req);
2261 __hci_update_background_scan(&req);
2262 hci_req_run(&req, NULL);
2263 }
2264
2265 unlock:
2266 hci_dev_unlock(hdev);
2267 }
2268
2269 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2270 {
2271 struct mgmt_mode *cp = data;
2272 struct hci_cp_write_le_host_supported hci_cp;
2273 struct pending_cmd *cmd;
2274 struct hci_request req;
2275 int err;
2276 u8 val, enabled;
2277
2278 BT_DBG("request for %s", hdev->name);
2279
2280 if (!lmp_le_capable(hdev))
2281 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2282 MGMT_STATUS_NOT_SUPPORTED);
2283
2284 if (cp->val != 0x00 && cp->val != 0x01)
2285 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2286 MGMT_STATUS_INVALID_PARAMS);
2287
2288 /* LE-only devices do not allow toggling LE on/off */
2289 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2290 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2291 MGMT_STATUS_REJECTED);
2292
2293 hci_dev_lock(hdev);
2294
2295 val = !!cp->val;
2296 enabled = lmp_host_le_capable(hdev);
2297
2298 if (!hdev_is_powered(hdev) || val == enabled) {
2299 bool changed = false;
2300
2301 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2302 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2303 changed = true;
2304 }
2305
2306 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2307 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2308 changed = true;
2309 }
2310
2311 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2312 if (err < 0)
2313 goto unlock;
2314
2315 if (changed)
2316 err = new_settings(hdev, sk);
2317
2318 goto unlock;
2319 }
2320
2321 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2322 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2323 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2324 MGMT_STATUS_BUSY);
2325 goto unlock;
2326 }
2327
2328 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2329 if (!cmd) {
2330 err = -ENOMEM;
2331 goto unlock;
2332 }
2333
2334 hci_req_init(&req, hdev);
2335
2336 memset(&hci_cp, 0, sizeof(hci_cp));
2337
2338 if (val) {
2339 hci_cp.le = val;
2340 hci_cp.simul = 0x00;
2341 } else {
2342 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2343 disable_advertising(&req);
2344 }
2345
2346 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2347 &hci_cp);
2348
2349 err = hci_req_run(&req, le_enable_complete);
2350 if (err < 0)
2351 mgmt_pending_remove(cmd);
2352
2353 unlock:
2354 hci_dev_unlock(hdev);
2355 return err;
2356 }
2357
2358 /* This is a helper function to test for pending mgmt commands that can
2359 * cause CoD or EIR HCI commands. We can only allow one such pending
2360 * mgmt command at a time since otherwise we cannot easily track what
2361 * the current values are, will be, and based on that calculate if a new
2362 * HCI command needs to be sent and if yes with what value.
2363 */
2364 static bool pending_eir_or_class(struct hci_dev *hdev)
2365 {
2366 struct pending_cmd *cmd;
2367
2368 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2369 switch (cmd->opcode) {
2370 case MGMT_OP_ADD_UUID:
2371 case MGMT_OP_REMOVE_UUID:
2372 case MGMT_OP_SET_DEV_CLASS:
2373 case MGMT_OP_SET_POWERED:
2374 return true;
2375 }
2376 }
2377
2378 return false;
2379 }
2380
2381 static const u8 bluetooth_base_uuid[] = {
2382 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2383 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2384 };
2385
2386 static u8 get_uuid_size(const u8 *uuid)
2387 {
2388 u32 val;
2389
2390 if (memcmp(uuid, bluetooth_base_uuid, 12))
2391 return 128;
2392
2393 val = get_unaligned_le32(&uuid[12]);
2394 if (val > 0xffff)
2395 return 32;
2396
2397 return 16;
2398 }
2399
2400 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2401 {
2402 struct pending_cmd *cmd;
2403
2404 hci_dev_lock(hdev);
2405
2406 cmd = mgmt_pending_find(mgmt_op, hdev);
2407 if (!cmd)
2408 goto unlock;
2409
2410 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2411 hdev->dev_class, 3);
2412
2413 mgmt_pending_remove(cmd);
2414
2415 unlock:
2416 hci_dev_unlock(hdev);
2417 }
2418
2419 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2420 {
2421 BT_DBG("status 0x%02x", status);
2422
2423 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2424 }
2425
2426 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2427 {
2428 struct mgmt_cp_add_uuid *cp = data;
2429 struct pending_cmd *cmd;
2430 struct hci_request req;
2431 struct bt_uuid *uuid;
2432 int err;
2433
2434 BT_DBG("request for %s", hdev->name);
2435
2436 hci_dev_lock(hdev);
2437
2438 if (pending_eir_or_class(hdev)) {
2439 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2440 MGMT_STATUS_BUSY);
2441 goto failed;
2442 }
2443
2444 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2445 if (!uuid) {
2446 err = -ENOMEM;
2447 goto failed;
2448 }
2449
2450 memcpy(uuid->uuid, cp->uuid, 16);
2451 uuid->svc_hint = cp->svc_hint;
2452 uuid->size = get_uuid_size(cp->uuid);
2453
2454 list_add_tail(&uuid->list, &hdev->uuids);
2455
2456 hci_req_init(&req, hdev);
2457
2458 update_class(&req);
2459 update_eir(&req);
2460
2461 err = hci_req_run(&req, add_uuid_complete);
2462 if (err < 0) {
2463 if (err != -ENODATA)
2464 goto failed;
2465
2466 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2467 hdev->dev_class, 3);
2468 goto failed;
2469 }
2470
2471 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2472 if (!cmd) {
2473 err = -ENOMEM;
2474 goto failed;
2475 }
2476
2477 err = 0;
2478
2479 failed:
2480 hci_dev_unlock(hdev);
2481 return err;
2482 }
2483
2484 static bool enable_service_cache(struct hci_dev *hdev)
2485 {
2486 if (!hdev_is_powered(hdev))
2487 return false;
2488
2489 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2490 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2491 CACHE_TIMEOUT);
2492 return true;
2493 }
2494
2495 return false;
2496 }
2497
2498 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2499 {
2500 BT_DBG("status 0x%02x", status);
2501
2502 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2503 }
2504
2505 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2506 u16 len)
2507 {
2508 struct mgmt_cp_remove_uuid *cp = data;
2509 struct pending_cmd *cmd;
2510 struct bt_uuid *match, *tmp;
2511 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2512 struct hci_request req;
2513 int err, found;
2514
2515 BT_DBG("request for %s", hdev->name);
2516
2517 hci_dev_lock(hdev);
2518
2519 if (pending_eir_or_class(hdev)) {
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_BUSY);
2522 goto unlock;
2523 }
2524
2525 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2526 hci_uuids_clear(hdev);
2527
2528 if (enable_service_cache(hdev)) {
2529 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2530 0, hdev->dev_class, 3);
2531 goto unlock;
2532 }
2533
2534 goto update_class;
2535 }
2536
2537 found = 0;
2538
2539 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2540 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2541 continue;
2542
2543 list_del(&match->list);
2544 kfree(match);
2545 found++;
2546 }
2547
2548 if (found == 0) {
2549 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2550 MGMT_STATUS_INVALID_PARAMS);
2551 goto unlock;
2552 }
2553
2554 update_class:
2555 hci_req_init(&req, hdev);
2556
2557 update_class(&req);
2558 update_eir(&req);
2559
2560 err = hci_req_run(&req, remove_uuid_complete);
2561 if (err < 0) {
2562 if (err != -ENODATA)
2563 goto unlock;
2564
2565 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2566 hdev->dev_class, 3);
2567 goto unlock;
2568 }
2569
2570 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2571 if (!cmd) {
2572 err = -ENOMEM;
2573 goto unlock;
2574 }
2575
2576 err = 0;
2577
2578 unlock:
2579 hci_dev_unlock(hdev);
2580 return err;
2581 }
2582
2583 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2584 {
2585 BT_DBG("status 0x%02x", status);
2586
2587 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2588 }
2589
2590 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2591 u16 len)
2592 {
2593 struct mgmt_cp_set_dev_class *cp = data;
2594 struct pending_cmd *cmd;
2595 struct hci_request req;
2596 int err;
2597
2598 BT_DBG("request for %s", hdev->name);
2599
2600 if (!lmp_bredr_capable(hdev))
2601 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2602 MGMT_STATUS_NOT_SUPPORTED);
2603
2604 hci_dev_lock(hdev);
2605
2606 if (pending_eir_or_class(hdev)) {
2607 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2608 MGMT_STATUS_BUSY);
2609 goto unlock;
2610 }
2611
2612 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2613 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2614 MGMT_STATUS_INVALID_PARAMS);
2615 goto unlock;
2616 }
2617
2618 hdev->major_class = cp->major;
2619 hdev->minor_class = cp->minor;
2620
2621 if (!hdev_is_powered(hdev)) {
2622 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2623 hdev->dev_class, 3);
2624 goto unlock;
2625 }
2626
2627 hci_req_init(&req, hdev);
2628
2629 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2630 hci_dev_unlock(hdev);
2631 cancel_delayed_work_sync(&hdev->service_cache);
2632 hci_dev_lock(hdev);
2633 update_eir(&req);
2634 }
2635
2636 update_class(&req);
2637
2638 err = hci_req_run(&req, set_class_complete);
2639 if (err < 0) {
2640 if (err != -ENODATA)
2641 goto unlock;
2642
2643 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2644 hdev->dev_class, 3);
2645 goto unlock;
2646 }
2647
2648 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2649 if (!cmd) {
2650 err = -ENOMEM;
2651 goto unlock;
2652 }
2653
2654 err = 0;
2655
2656 unlock:
2657 hci_dev_unlock(hdev);
2658 return err;
2659 }
2660
2661 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2662 u16 len)
2663 {
2664 struct mgmt_cp_load_link_keys *cp = data;
2665 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2666 sizeof(struct mgmt_link_key_info));
2667 u16 key_count, expected_len;
2668 bool changed;
2669 int i;
2670
2671 BT_DBG("request for %s", hdev->name);
2672
2673 if (!lmp_bredr_capable(hdev))
2674 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2675 MGMT_STATUS_NOT_SUPPORTED);
2676
2677 key_count = __le16_to_cpu(cp->key_count);
2678 if (key_count > max_key_count) {
2679 BT_ERR("load_link_keys: too big key_count value %u",
2680 key_count);
2681 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2682 MGMT_STATUS_INVALID_PARAMS);
2683 }
2684
2685 expected_len = sizeof(*cp) + key_count *
2686 sizeof(struct mgmt_link_key_info);
2687 if (expected_len != len) {
2688 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2689 expected_len, len);
2690 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2691 MGMT_STATUS_INVALID_PARAMS);
2692 }
2693
2694 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2695 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2696 MGMT_STATUS_INVALID_PARAMS);
2697
2698 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2699 key_count);
2700
2701 for (i = 0; i < key_count; i++) {
2702 struct mgmt_link_key_info *key = &cp->keys[i];
2703
2704 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2705 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2706 MGMT_STATUS_INVALID_PARAMS);
2707 }
2708
2709 hci_dev_lock(hdev);
2710
2711 hci_link_keys_clear(hdev);
2712
2713 if (cp->debug_keys)
2714 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2715 &hdev->dev_flags);
2716 else
2717 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2718 &hdev->dev_flags);
2719
2720 if (changed)
2721 new_settings(hdev, NULL);
2722
2723 for (i = 0; i < key_count; i++) {
2724 struct mgmt_link_key_info *key = &cp->keys[i];
2725
2726 /* Always ignore debug keys and require a new pairing if
2727 * the user wants to use them.
2728 */
2729 if (key->type == HCI_LK_DEBUG_COMBINATION)
2730 continue;
2731
2732 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2733 key->type, key->pin_len, NULL);
2734 }
2735
2736 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2737
2738 hci_dev_unlock(hdev);
2739
2740 return 0;
2741 }
2742
2743 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2744 u8 addr_type, struct sock *skip_sk)
2745 {
2746 struct mgmt_ev_device_unpaired ev;
2747
2748 bacpy(&ev.addr.bdaddr, bdaddr);
2749 ev.addr.type = addr_type;
2750
2751 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2752 skip_sk);
2753 }
2754
2755 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2756 u16 len)
2757 {
2758 struct mgmt_cp_unpair_device *cp = data;
2759 struct mgmt_rp_unpair_device rp;
2760 struct hci_cp_disconnect dc;
2761 struct pending_cmd *cmd;
2762 struct hci_conn *conn;
2763 int err;
2764
2765 memset(&rp, 0, sizeof(rp));
2766 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2767 rp.addr.type = cp->addr.type;
2768
2769 if (!bdaddr_type_is_valid(cp->addr.type))
2770 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2771 MGMT_STATUS_INVALID_PARAMS,
2772 &rp, sizeof(rp));
2773
2774 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2775 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2776 MGMT_STATUS_INVALID_PARAMS,
2777 &rp, sizeof(rp));
2778
2779 hci_dev_lock(hdev);
2780
2781 if (!hdev_is_powered(hdev)) {
2782 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2783 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2784 goto unlock;
2785 }
2786
2787 if (cp->addr.type == BDADDR_BREDR) {
2788 /* If disconnection is requested, then look up the
2789 * connection. If the remote device is connected, it
2790 * will be later used to terminate the link.
2791 *
2792 * Setting it to NULL explicitly will cause no
2793 * termination of the link.
2794 */
2795 if (cp->disconnect)
2796 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2797 &cp->addr.bdaddr);
2798 else
2799 conn = NULL;
2800
2801 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2802 } else {
2803 u8 addr_type;
2804
2805 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2806 &cp->addr.bdaddr);
2807 if (conn) {
2808 /* Defer clearing up the connection parameters
2809 * until closing to give a chance of keeping
2810 * them if a repairing happens.
2811 */
2812 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2813
2814 /* If disconnection is not requested, then
2815 * clear the connection variable so that the
2816 * link is not terminated.
2817 */
2818 if (!cp->disconnect)
2819 conn = NULL;
2820 }
2821
2822 if (cp->addr.type == BDADDR_LE_PUBLIC)
2823 addr_type = ADDR_LE_DEV_PUBLIC;
2824 else
2825 addr_type = ADDR_LE_DEV_RANDOM;
2826
2827 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2828
2829 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2830 }
2831
2832 if (err < 0) {
2833 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2834 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2835 goto unlock;
2836 }
2837
2838 /* If the connection variable is set, then termination of the
2839 * link is requested.
2840 */
2841 if (!conn) {
2842 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2843 &rp, sizeof(rp));
2844 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2845 goto unlock;
2846 }
2847
2848 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2849 sizeof(*cp));
2850 if (!cmd) {
2851 err = -ENOMEM;
2852 goto unlock;
2853 }
2854
2855 cmd->cmd_complete = addr_cmd_complete;
2856
2857 dc.handle = cpu_to_le16(conn->handle);
2858 dc.reason = 0x13; /* Remote User Terminated Connection */
2859 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2860 if (err < 0)
2861 mgmt_pending_remove(cmd);
2862
2863 unlock:
2864 hci_dev_unlock(hdev);
2865 return err;
2866 }
2867
2868 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2869 u16 len)
2870 {
2871 struct mgmt_cp_disconnect *cp = data;
2872 struct mgmt_rp_disconnect rp;
2873 struct pending_cmd *cmd;
2874 struct hci_conn *conn;
2875 int err;
2876
2877 BT_DBG("");
2878
2879 memset(&rp, 0, sizeof(rp));
2880 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2881 rp.addr.type = cp->addr.type;
2882
2883 if (!bdaddr_type_is_valid(cp->addr.type))
2884 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2885 MGMT_STATUS_INVALID_PARAMS,
2886 &rp, sizeof(rp));
2887
2888 hci_dev_lock(hdev);
2889
2890 if (!test_bit(HCI_UP, &hdev->flags)) {
2891 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2892 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2893 goto failed;
2894 }
2895
2896 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2897 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2898 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2899 goto failed;
2900 }
2901
2902 if (cp->addr.type == BDADDR_BREDR)
2903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2904 &cp->addr.bdaddr);
2905 else
2906 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2907
2908 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2909 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2910 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2911 goto failed;
2912 }
2913
2914 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2915 if (!cmd) {
2916 err = -ENOMEM;
2917 goto failed;
2918 }
2919
2920 cmd->cmd_complete = generic_cmd_complete;
2921
2922 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2923 if (err < 0)
2924 mgmt_pending_remove(cmd);
2925
2926 failed:
2927 hci_dev_unlock(hdev);
2928 return err;
2929 }
2930
2931 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2932 {
2933 switch (link_type) {
2934 case LE_LINK:
2935 switch (addr_type) {
2936 case ADDR_LE_DEV_PUBLIC:
2937 return BDADDR_LE_PUBLIC;
2938
2939 default:
2940 /* Fallback to LE Random address type */
2941 return BDADDR_LE_RANDOM;
2942 }
2943
2944 default:
2945 /* Fallback to BR/EDR type */
2946 return BDADDR_BREDR;
2947 }
2948 }
2949
2950 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2951 u16 data_len)
2952 {
2953 struct mgmt_rp_get_connections *rp;
2954 struct hci_conn *c;
2955 size_t rp_len;
2956 int err;
2957 u16 i;
2958
2959 BT_DBG("");
2960
2961 hci_dev_lock(hdev);
2962
2963 if (!hdev_is_powered(hdev)) {
2964 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2965 MGMT_STATUS_NOT_POWERED);
2966 goto unlock;
2967 }
2968
2969 i = 0;
2970 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2971 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2972 i++;
2973 }
2974
2975 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2976 rp = kmalloc(rp_len, GFP_KERNEL);
2977 if (!rp) {
2978 err = -ENOMEM;
2979 goto unlock;
2980 }
2981
2982 i = 0;
2983 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2984 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2985 continue;
2986 bacpy(&rp->addr[i].bdaddr, &c->dst);
2987 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2988 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2989 continue;
2990 i++;
2991 }
2992
2993 rp->conn_count = cpu_to_le16(i);
2994
2995 /* Recalculate length in case of filtered SCO connections, etc */
2996 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2997
2998 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2999 rp_len);
3000
3001 kfree(rp);
3002
3003 unlock:
3004 hci_dev_unlock(hdev);
3005 return err;
3006 }
3007
3008 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3009 struct mgmt_cp_pin_code_neg_reply *cp)
3010 {
3011 struct pending_cmd *cmd;
3012 int err;
3013
3014 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3015 sizeof(*cp));
3016 if (!cmd)
3017 return -ENOMEM;
3018
3019 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3020 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3021 if (err < 0)
3022 mgmt_pending_remove(cmd);
3023
3024 return err;
3025 }
3026
3027 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3028 u16 len)
3029 {
3030 struct hci_conn *conn;
3031 struct mgmt_cp_pin_code_reply *cp = data;
3032 struct hci_cp_pin_code_reply reply;
3033 struct pending_cmd *cmd;
3034 int err;
3035
3036 BT_DBG("");
3037
3038 hci_dev_lock(hdev);
3039
3040 if (!hdev_is_powered(hdev)) {
3041 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3042 MGMT_STATUS_NOT_POWERED);
3043 goto failed;
3044 }
3045
3046 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3047 if (!conn) {
3048 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3049 MGMT_STATUS_NOT_CONNECTED);
3050 goto failed;
3051 }
3052
3053 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3054 struct mgmt_cp_pin_code_neg_reply ncp;
3055
3056 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3057
3058 BT_ERR("PIN code is not 16 bytes long");
3059
3060 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3061 if (err >= 0)
3062 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3063 MGMT_STATUS_INVALID_PARAMS);
3064
3065 goto failed;
3066 }
3067
3068 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3069 if (!cmd) {
3070 err = -ENOMEM;
3071 goto failed;
3072 }
3073
3074 cmd->cmd_complete = addr_cmd_complete;
3075
3076 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3077 reply.pin_len = cp->pin_len;
3078 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3079
3080 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3081 if (err < 0)
3082 mgmt_pending_remove(cmd);
3083
3084 failed:
3085 hci_dev_unlock(hdev);
3086 return err;
3087 }
3088
3089 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3090 u16 len)
3091 {
3092 struct mgmt_cp_set_io_capability *cp = data;
3093
3094 BT_DBG("");
3095
3096 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3097 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3098 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3099
3100 hci_dev_lock(hdev);
3101
3102 hdev->io_capability = cp->io_capability;
3103
3104 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3105 hdev->io_capability);
3106
3107 hci_dev_unlock(hdev);
3108
3109 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3110 0);
3111 }
3112
3113 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3114 {
3115 struct hci_dev *hdev = conn->hdev;
3116 struct pending_cmd *cmd;
3117
3118 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3119 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3120 continue;
3121
3122 if (cmd->user_data != conn)
3123 continue;
3124
3125 return cmd;
3126 }
3127
3128 return NULL;
3129 }
3130
3131 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3132 {
3133 struct mgmt_rp_pair_device rp;
3134 struct hci_conn *conn = cmd->user_data;
3135 int err;
3136
3137 bacpy(&rp.addr.bdaddr, &conn->dst);
3138 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3139
3140 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3141 &rp, sizeof(rp));
3142
3143 /* So we don't get further callbacks for this connection */
3144 conn->connect_cfm_cb = NULL;
3145 conn->security_cfm_cb = NULL;
3146 conn->disconn_cfm_cb = NULL;
3147
3148 hci_conn_drop(conn);
3149
3150 /* The device is paired so there is no need to remove
3151 * its connection parameters anymore.
3152 */
3153 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3154
3155 hci_conn_put(conn);
3156
3157 return err;
3158 }
3159
3160 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3161 {
3162 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3163 struct pending_cmd *cmd;
3164
3165 cmd = find_pairing(conn);
3166 if (cmd) {
3167 cmd->cmd_complete(cmd, status);
3168 mgmt_pending_remove(cmd);
3169 }
3170 }
3171
3172 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3173 {
3174 struct pending_cmd *cmd;
3175
3176 BT_DBG("status %u", status);
3177
3178 cmd = find_pairing(conn);
3179 if (!cmd) {
3180 BT_DBG("Unable to find a pending command");
3181 return;
3182 }
3183
3184 cmd->cmd_complete(cmd, mgmt_status(status));
3185 mgmt_pending_remove(cmd);
3186 }
3187
3188 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3189 {
3190 struct pending_cmd *cmd;
3191
3192 BT_DBG("status %u", status);
3193
3194 if (!status)
3195 return;
3196
3197 cmd = find_pairing(conn);
3198 if (!cmd) {
3199 BT_DBG("Unable to find a pending command");
3200 return;
3201 }
3202
3203 cmd->cmd_complete(cmd, mgmt_status(status));
3204 mgmt_pending_remove(cmd);
3205 }
3206
3207 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3208 u16 len)
3209 {
3210 struct mgmt_cp_pair_device *cp = data;
3211 struct mgmt_rp_pair_device rp;
3212 struct pending_cmd *cmd;
3213 u8 sec_level, auth_type;
3214 struct hci_conn *conn;
3215 int err;
3216
3217 BT_DBG("");
3218
3219 memset(&rp, 0, sizeof(rp));
3220 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3221 rp.addr.type = cp->addr.type;
3222
3223 if (!bdaddr_type_is_valid(cp->addr.type))
3224 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3225 MGMT_STATUS_INVALID_PARAMS,
3226 &rp, sizeof(rp));
3227
3228 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3229 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3230 MGMT_STATUS_INVALID_PARAMS,
3231 &rp, sizeof(rp));
3232
3233 hci_dev_lock(hdev);
3234
3235 if (!hdev_is_powered(hdev)) {
3236 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3237 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3238 goto unlock;
3239 }
3240
3241 sec_level = BT_SECURITY_MEDIUM;
3242 auth_type = HCI_AT_DEDICATED_BONDING;
3243
3244 if (cp->addr.type == BDADDR_BREDR) {
3245 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3246 auth_type);
3247 } else {
3248 u8 addr_type;
3249
3250 /* Convert from L2CAP channel address type to HCI address type
3251 */
3252 if (cp->addr.type == BDADDR_LE_PUBLIC)
3253 addr_type = ADDR_LE_DEV_PUBLIC;
3254 else
3255 addr_type = ADDR_LE_DEV_RANDOM;
3256
3257 /* When pairing a new device, it is expected to remember
3258 * this device for future connections. Adding the connection
3259 * parameter information ahead of time allows tracking
3260 * of the slave preferred values and will speed up any
3261 * further connection establishment.
3262 *
3263 * If connection parameters already exist, then they
3264 * will be kept and this function does nothing.
3265 */
3266 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3267
3268 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3269 sec_level, HCI_LE_CONN_TIMEOUT,
3270 HCI_ROLE_MASTER);
3271 }
3272
3273 if (IS_ERR(conn)) {
3274 int status;
3275
3276 if (PTR_ERR(conn) == -EBUSY)
3277 status = MGMT_STATUS_BUSY;
3278 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3279 status = MGMT_STATUS_NOT_SUPPORTED;
3280 else if (PTR_ERR(conn) == -ECONNREFUSED)
3281 status = MGMT_STATUS_REJECTED;
3282 else
3283 status = MGMT_STATUS_CONNECT_FAILED;
3284
3285 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3286 status, &rp,
3287 sizeof(rp));
3288 goto unlock;
3289 }
3290
3291 if (conn->connect_cfm_cb) {
3292 hci_conn_drop(conn);
3293 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3294 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3295 goto unlock;
3296 }
3297
3298 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3299 if (!cmd) {
3300 err = -ENOMEM;
3301 hci_conn_drop(conn);
3302 goto unlock;
3303 }
3304
3305 cmd->cmd_complete = pairing_complete;
3306
3307 /* For LE, just connecting isn't a proof that the pairing finished */
3308 if (cp->addr.type == BDADDR_BREDR) {
3309 conn->connect_cfm_cb = pairing_complete_cb;
3310 conn->security_cfm_cb = pairing_complete_cb;
3311 conn->disconn_cfm_cb = pairing_complete_cb;
3312 } else {
3313 conn->connect_cfm_cb = le_pairing_complete_cb;
3314 conn->security_cfm_cb = le_pairing_complete_cb;
3315 conn->disconn_cfm_cb = le_pairing_complete_cb;
3316 }
3317
3318 conn->io_capability = cp->io_cap;
3319 cmd->user_data = hci_conn_get(conn);
3320
3321 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3322 hci_conn_security(conn, sec_level, auth_type, true)) {
3323 cmd->cmd_complete(cmd, 0);
3324 mgmt_pending_remove(cmd);
3325 }
3326
3327 err = 0;
3328
3329 unlock:
3330 hci_dev_unlock(hdev);
3331 return err;
3332 }
3333
3334 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3335 u16 len)
3336 {
3337 struct mgmt_addr_info *addr = data;
3338 struct pending_cmd *cmd;
3339 struct hci_conn *conn;
3340 int err;
3341
3342 BT_DBG("");
3343
3344 hci_dev_lock(hdev);
3345
3346 if (!hdev_is_powered(hdev)) {
3347 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3348 MGMT_STATUS_NOT_POWERED);
3349 goto unlock;
3350 }
3351
3352 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3353 if (!cmd) {
3354 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3355 MGMT_STATUS_INVALID_PARAMS);
3356 goto unlock;
3357 }
3358
3359 conn = cmd->user_data;
3360
3361 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3362 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3363 MGMT_STATUS_INVALID_PARAMS);
3364 goto unlock;
3365 }
3366
3367 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3368 mgmt_pending_remove(cmd);
3369
3370 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3371 addr, sizeof(*addr));
3372 unlock:
3373 hci_dev_unlock(hdev);
3374 return err;
3375 }
3376
3377 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3378 struct mgmt_addr_info *addr, u16 mgmt_op,
3379 u16 hci_op, __le32 passkey)
3380 {
3381 struct pending_cmd *cmd;
3382 struct hci_conn *conn;
3383 int err;
3384
3385 hci_dev_lock(hdev);
3386
3387 if (!hdev_is_powered(hdev)) {
3388 err = cmd_complete(sk, hdev->id, mgmt_op,
3389 MGMT_STATUS_NOT_POWERED, addr,
3390 sizeof(*addr));
3391 goto done;
3392 }
3393
3394 if (addr->type == BDADDR_BREDR)
3395 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3396 else
3397 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3398
3399 if (!conn) {
3400 err = cmd_complete(sk, hdev->id, mgmt_op,
3401 MGMT_STATUS_NOT_CONNECTED, addr,
3402 sizeof(*addr));
3403 goto done;
3404 }
3405
3406 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3407 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3408 if (!err)
3409 err = cmd_complete(sk, hdev->id, mgmt_op,
3410 MGMT_STATUS_SUCCESS, addr,
3411 sizeof(*addr));
3412 else
3413 err = cmd_complete(sk, hdev->id, mgmt_op,
3414 MGMT_STATUS_FAILED, addr,
3415 sizeof(*addr));
3416
3417 goto done;
3418 }
3419
3420 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3421 if (!cmd) {
3422 err = -ENOMEM;
3423 goto done;
3424 }
3425
3426 cmd->cmd_complete = addr_cmd_complete;
3427
3428 /* Continue with pairing via HCI */
3429 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3430 struct hci_cp_user_passkey_reply cp;
3431
3432 bacpy(&cp.bdaddr, &addr->bdaddr);
3433 cp.passkey = passkey;
3434 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3435 } else
3436 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3437 &addr->bdaddr);
3438
3439 if (err < 0)
3440 mgmt_pending_remove(cmd);
3441
3442 done:
3443 hci_dev_unlock(hdev);
3444 return err;
3445 }
3446
3447 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3448 void *data, u16 len)
3449 {
3450 struct mgmt_cp_pin_code_neg_reply *cp = data;
3451
3452 BT_DBG("");
3453
3454 return user_pairing_resp(sk, hdev, &cp->addr,
3455 MGMT_OP_PIN_CODE_NEG_REPLY,
3456 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3457 }
3458
3459 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3460 u16 len)
3461 {
3462 struct mgmt_cp_user_confirm_reply *cp = data;
3463
3464 BT_DBG("");
3465
3466 if (len != sizeof(*cp))
3467 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3468 MGMT_STATUS_INVALID_PARAMS);
3469
3470 return user_pairing_resp(sk, hdev, &cp->addr,
3471 MGMT_OP_USER_CONFIRM_REPLY,
3472 HCI_OP_USER_CONFIRM_REPLY, 0);
3473 }
3474
3475 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3476 void *data, u16 len)
3477 {
3478 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3479
3480 BT_DBG("");
3481
3482 return user_pairing_resp(sk, hdev, &cp->addr,
3483 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3484 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3485 }
3486
3487 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3488 u16 len)
3489 {
3490 struct mgmt_cp_user_passkey_reply *cp = data;
3491
3492 BT_DBG("");
3493
3494 return user_pairing_resp(sk, hdev, &cp->addr,
3495 MGMT_OP_USER_PASSKEY_REPLY,
3496 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3497 }
3498
3499 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3500 void *data, u16 len)
3501 {
3502 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3503
3504 BT_DBG("");
3505
3506 return user_pairing_resp(sk, hdev, &cp->addr,
3507 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3508 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3509 }
3510
3511 static void update_name(struct hci_request *req)
3512 {
3513 struct hci_dev *hdev = req->hdev;
3514 struct hci_cp_write_local_name cp;
3515
3516 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3517
3518 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3519 }
3520
3521 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3522 {
3523 struct mgmt_cp_set_local_name *cp;
3524 struct pending_cmd *cmd;
3525
3526 BT_DBG("status 0x%02x", status);
3527
3528 hci_dev_lock(hdev);
3529
3530 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3531 if (!cmd)
3532 goto unlock;
3533
3534 cp = cmd->param;
3535
3536 if (status)
3537 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3538 mgmt_status(status));
3539 else
3540 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3541 cp, sizeof(*cp));
3542
3543 mgmt_pending_remove(cmd);
3544
3545 unlock:
3546 hci_dev_unlock(hdev);
3547 }
3548
3549 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3550 u16 len)
3551 {
3552 struct mgmt_cp_set_local_name *cp = data;
3553 struct pending_cmd *cmd;
3554 struct hci_request req;
3555 int err;
3556
3557 BT_DBG("");
3558
3559 hci_dev_lock(hdev);
3560
3561 /* If the old values are the same as the new ones just return a
3562 * direct command complete event.
3563 */
3564 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3565 !memcmp(hdev->short_name, cp->short_name,
3566 sizeof(hdev->short_name))) {
3567 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3568 data, len);
3569 goto failed;
3570 }
3571
3572 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3573
3574 if (!hdev_is_powered(hdev)) {
3575 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3576
3577 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3578 data, len);
3579 if (err < 0)
3580 goto failed;
3581
3582 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3583 sk);
3584
3585 goto failed;
3586 }
3587
3588 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3589 if (!cmd) {
3590 err = -ENOMEM;
3591 goto failed;
3592 }
3593
3594 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3595
3596 hci_req_init(&req, hdev);
3597
3598 if (lmp_bredr_capable(hdev)) {
3599 update_name(&req);
3600 update_eir(&req);
3601 }
3602
3603 /* The name is stored in the scan response data and so
3604 * no need to udpate the advertising data here.
3605 */
3606 if (lmp_le_capable(hdev))
3607 update_scan_rsp_data(&req);
3608
3609 err = hci_req_run(&req, set_name_complete);
3610 if (err < 0)
3611 mgmt_pending_remove(cmd);
3612
3613 failed:
3614 hci_dev_unlock(hdev);
3615 return err;
3616 }
3617
3618 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3619 void *data, u16 data_len)
3620 {
3621 struct pending_cmd *cmd;
3622 int err;
3623
3624 BT_DBG("%s", hdev->name);
3625
3626 hci_dev_lock(hdev);
3627
3628 if (!hdev_is_powered(hdev)) {
3629 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3630 MGMT_STATUS_NOT_POWERED);
3631 goto unlock;
3632 }
3633
3634 if (!lmp_ssp_capable(hdev)) {
3635 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3636 MGMT_STATUS_NOT_SUPPORTED);
3637 goto unlock;
3638 }
3639
3640 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3641 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3642 MGMT_STATUS_BUSY);
3643 goto unlock;
3644 }
3645
3646 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3647 if (!cmd) {
3648 err = -ENOMEM;
3649 goto unlock;
3650 }
3651
3652 if (bredr_sc_enabled(hdev))
3653 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3654 0, NULL);
3655 else
3656 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3657
3658 if (err < 0)
3659 mgmt_pending_remove(cmd);
3660
3661 unlock:
3662 hci_dev_unlock(hdev);
3663 return err;
3664 }
3665
3666 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3668 {
3669 struct mgmt_addr_info *addr = data;
3670 int err;
3671
3672 BT_DBG("%s ", hdev->name);
3673
3674 if (!bdaddr_type_is_valid(addr->type))
3675 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3676 MGMT_STATUS_INVALID_PARAMS, addr,
3677 sizeof(*addr));
3678
3679 hci_dev_lock(hdev);
3680
3681 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3682 struct mgmt_cp_add_remote_oob_data *cp = data;
3683 u8 status;
3684
3685 if (cp->addr.type != BDADDR_BREDR) {
3686 err = cmd_complete(sk, hdev->id,
3687 MGMT_OP_ADD_REMOTE_OOB_DATA,
3688 MGMT_STATUS_INVALID_PARAMS,
3689 &cp->addr, sizeof(cp->addr));
3690 goto unlock;
3691 }
3692
3693 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3694 cp->addr.type, cp->hash,
3695 cp->rand, NULL, NULL);
3696 if (err < 0)
3697 status = MGMT_STATUS_FAILED;
3698 else
3699 status = MGMT_STATUS_SUCCESS;
3700
3701 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3702 status, &cp->addr, sizeof(cp->addr));
3703 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3704 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3705 u8 *rand192, *hash192, *rand256, *hash256;
3706 u8 status;
3707
3708 if (bdaddr_type_is_le(cp->addr.type)) {
3709 /* Enforce zero-valued 192-bit parameters as
3710 * long as legacy SMP OOB isn't implemented.
3711 */
3712 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3713 memcmp(cp->hash192, ZERO_KEY, 16)) {
3714 err = cmd_complete(sk, hdev->id,
3715 MGMT_OP_ADD_REMOTE_OOB_DATA,
3716 MGMT_STATUS_INVALID_PARAMS,
3717 addr, sizeof(*addr));
3718 goto unlock;
3719 }
3720
3721 rand192 = NULL;
3722 hash192 = NULL;
3723 } else {
3724 /* In case one of the P-192 values is set to zero,
3725 * then just disable OOB data for P-192.
3726 */
3727 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3728 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3729 rand192 = NULL;
3730 hash192 = NULL;
3731 } else {
3732 rand192 = cp->rand192;
3733 hash192 = cp->hash192;
3734 }
3735 }
3736
3737 /* In case one of the P-256 values is set to zero, then just
3738 * disable OOB data for P-256.
3739 */
3740 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3741 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3742 rand256 = NULL;
3743 hash256 = NULL;
3744 } else {
3745 rand256 = cp->rand256;
3746 hash256 = cp->hash256;
3747 }
3748
3749 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3750 cp->addr.type, hash192, rand192,
3751 hash256, rand256);
3752 if (err < 0)
3753 status = MGMT_STATUS_FAILED;
3754 else
3755 status = MGMT_STATUS_SUCCESS;
3756
3757 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3758 status, &cp->addr, sizeof(cp->addr));
3759 } else {
3760 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3761 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3762 MGMT_STATUS_INVALID_PARAMS);
3763 }
3764
3765 unlock:
3766 hci_dev_unlock(hdev);
3767 return err;
3768 }
3769
3770 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3771 void *data, u16 len)
3772 {
3773 struct mgmt_cp_remove_remote_oob_data *cp = data;
3774 u8 status;
3775 int err;
3776
3777 BT_DBG("%s", hdev->name);
3778
3779 if (cp->addr.type != BDADDR_BREDR)
3780 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3781 MGMT_STATUS_INVALID_PARAMS,
3782 &cp->addr, sizeof(cp->addr));
3783
3784 hci_dev_lock(hdev);
3785
3786 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3787 hci_remote_oob_data_clear(hdev);
3788 status = MGMT_STATUS_SUCCESS;
3789 goto done;
3790 }
3791
3792 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3793 if (err < 0)
3794 status = MGMT_STATUS_INVALID_PARAMS;
3795 else
3796 status = MGMT_STATUS_SUCCESS;
3797
3798 done:
3799 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3800 status, &cp->addr, sizeof(cp->addr));
3801
3802 hci_dev_unlock(hdev);
3803 return err;
3804 }
3805
3806 static bool trigger_discovery(struct hci_request *req, u8 *status)
3807 {
3808 struct hci_dev *hdev = req->hdev;
3809 struct hci_cp_le_set_scan_param param_cp;
3810 struct hci_cp_le_set_scan_enable enable_cp;
3811 struct hci_cp_inquiry inq_cp;
3812 /* General inquiry access code (GIAC) */
3813 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3814 u8 own_addr_type;
3815 int err;
3816
3817 switch (hdev->discovery.type) {
3818 case DISCOV_TYPE_BREDR:
3819 *status = mgmt_bredr_support(hdev);
3820 if (*status)
3821 return false;
3822
3823 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3824 *status = MGMT_STATUS_BUSY;
3825 return false;
3826 }
3827
3828 hci_inquiry_cache_flush(hdev);
3829
3830 memset(&inq_cp, 0, sizeof(inq_cp));
3831 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3832 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3833 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3834 break;
3835
3836 case DISCOV_TYPE_LE:
3837 case DISCOV_TYPE_INTERLEAVED:
3838 *status = mgmt_le_support(hdev);
3839 if (*status)
3840 return false;
3841
3842 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3843 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3844 *status = MGMT_STATUS_NOT_SUPPORTED;
3845 return false;
3846 }
3847
3848 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3849 /* Don't let discovery abort an outgoing
3850 * connection attempt that's using directed
3851 * advertising.
3852 */
3853 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3854 BT_CONNECT)) {
3855 *status = MGMT_STATUS_REJECTED;
3856 return false;
3857 }
3858
3859 disable_advertising(req);
3860 }
3861
3862 /* If controller is scanning, it means the background scanning
3863 * is running. Thus, we should temporarily stop it in order to
3864 * set the discovery scanning parameters.
3865 */
3866 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3867 hci_req_add_le_scan_disable(req);
3868
3869 memset(&param_cp, 0, sizeof(param_cp));
3870
3871 /* All active scans will be done with either a resolvable
3872 * private address (when privacy feature has been enabled)
3873 * or non-resolvable private address.
3874 */
3875 err = hci_update_random_address(req, true, &own_addr_type);
3876 if (err < 0) {
3877 *status = MGMT_STATUS_FAILED;
3878 return false;
3879 }
3880
3881 param_cp.type = LE_SCAN_ACTIVE;
3882 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3883 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3884 param_cp.own_address_type = own_addr_type;
3885 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3886 &param_cp);
3887
3888 memset(&enable_cp, 0, sizeof(enable_cp));
3889 enable_cp.enable = LE_SCAN_ENABLE;
3890 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3891 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3892 &enable_cp);
3893 break;
3894
3895 default:
3896 *status = MGMT_STATUS_INVALID_PARAMS;
3897 return false;
3898 }
3899
3900 return true;
3901 }
3902
3903 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3904 u16 opcode)
3905 {
3906 struct pending_cmd *cmd;
3907 unsigned long timeout;
3908
3909 BT_DBG("status %d", status);
3910
3911 hci_dev_lock(hdev);
3912
3913 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3914 if (!cmd)
3915 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3916
3917 if (cmd) {
3918 cmd->cmd_complete(cmd, mgmt_status(status));
3919 mgmt_pending_remove(cmd);
3920 }
3921
3922 if (status) {
3923 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3924 goto unlock;
3925 }
3926
3927 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3928
3929 /* If the scan involves LE scan, pick proper timeout to schedule
3930 * hdev->le_scan_disable that will stop it.
3931 */
3932 switch (hdev->discovery.type) {
3933 case DISCOV_TYPE_LE:
3934 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3935 break;
3936 case DISCOV_TYPE_INTERLEAVED:
3937 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3938 break;
3939 case DISCOV_TYPE_BREDR:
3940 timeout = 0;
3941 break;
3942 default:
3943 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3944 timeout = 0;
3945 break;
3946 }
3947
3948 if (timeout) {
3949 /* When service discovery is used and the controller has
3950 * a strict duplicate filter, it is important to remember
3951 * the start and duration of the scan. This is required
3952 * for restarting scanning during the discovery phase.
3953 */
3954 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3955 &hdev->quirks) &&
3956 hdev->discovery.result_filtering) {
3957 hdev->discovery.scan_start = jiffies;
3958 hdev->discovery.scan_duration = timeout;
3959 }
3960
3961 queue_delayed_work(hdev->workqueue,
3962 &hdev->le_scan_disable, timeout);
3963 }
3964
3965 unlock:
3966 hci_dev_unlock(hdev);
3967 }
3968
3969 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3970 void *data, u16 len)
3971 {
3972 struct mgmt_cp_start_discovery *cp = data;
3973 struct pending_cmd *cmd;
3974 struct hci_request req;
3975 u8 status;
3976 int err;
3977
3978 BT_DBG("%s", hdev->name);
3979
3980 hci_dev_lock(hdev);
3981
3982 if (!hdev_is_powered(hdev)) {
3983 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3984 MGMT_STATUS_NOT_POWERED,
3985 &cp->type, sizeof(cp->type));
3986 goto failed;
3987 }
3988
3989 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3990 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3991 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3992 MGMT_STATUS_BUSY, &cp->type,
3993 sizeof(cp->type));
3994 goto failed;
3995 }
3996
3997 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3998 if (!cmd) {
3999 err = -ENOMEM;
4000 goto failed;
4001 }
4002
4003 cmd->cmd_complete = generic_cmd_complete;
4004
4005 /* Clear the discovery filter first to free any previously
4006 * allocated memory for the UUID list.
4007 */
4008 hci_discovery_filter_clear(hdev);
4009
4010 hdev->discovery.type = cp->type;
4011 hdev->discovery.report_invalid_rssi = false;
4012
4013 hci_req_init(&req, hdev);
4014
4015 if (!trigger_discovery(&req, &status)) {
4016 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4017 status, &cp->type, sizeof(cp->type));
4018 mgmt_pending_remove(cmd);
4019 goto failed;
4020 }
4021
4022 err = hci_req_run(&req, start_discovery_complete);
4023 if (err < 0) {
4024 mgmt_pending_remove(cmd);
4025 goto failed;
4026 }
4027
4028 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4029
4030 failed:
4031 hci_dev_unlock(hdev);
4032 return err;
4033 }
4034
4035 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4036 {
4037 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4038 cmd->param, 1);
4039 }
4040
4041 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4042 void *data, u16 len)
4043 {
4044 struct mgmt_cp_start_service_discovery *cp = data;
4045 struct pending_cmd *cmd;
4046 struct hci_request req;
4047 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4048 u16 uuid_count, expected_len;
4049 u8 status;
4050 int err;
4051
4052 BT_DBG("%s", hdev->name);
4053
4054 hci_dev_lock(hdev);
4055
4056 if (!hdev_is_powered(hdev)) {
4057 err = cmd_complete(sk, hdev->id,
4058 MGMT_OP_START_SERVICE_DISCOVERY,
4059 MGMT_STATUS_NOT_POWERED,
4060 &cp->type, sizeof(cp->type));
4061 goto failed;
4062 }
4063
4064 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4065 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4066 err = cmd_complete(sk, hdev->id,
4067 MGMT_OP_START_SERVICE_DISCOVERY,
4068 MGMT_STATUS_BUSY, &cp->type,
4069 sizeof(cp->type));
4070 goto failed;
4071 }
4072
4073 uuid_count = __le16_to_cpu(cp->uuid_count);
4074 if (uuid_count > max_uuid_count) {
4075 BT_ERR("service_discovery: too big uuid_count value %u",
4076 uuid_count);
4077 err = cmd_complete(sk, hdev->id,
4078 MGMT_OP_START_SERVICE_DISCOVERY,
4079 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4080 sizeof(cp->type));
4081 goto failed;
4082 }
4083
4084 expected_len = sizeof(*cp) + uuid_count * 16;
4085 if (expected_len != len) {
4086 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4087 expected_len, len);
4088 err = cmd_complete(sk, hdev->id,
4089 MGMT_OP_START_SERVICE_DISCOVERY,
4090 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4091 sizeof(cp->type));
4092 goto failed;
4093 }
4094
4095 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4096 hdev, data, len);
4097 if (!cmd) {
4098 err = -ENOMEM;
4099 goto failed;
4100 }
4101
4102 cmd->cmd_complete = service_discovery_cmd_complete;
4103
4104 /* Clear the discovery filter first to free any previously
4105 * allocated memory for the UUID list.
4106 */
4107 hci_discovery_filter_clear(hdev);
4108
4109 hdev->discovery.result_filtering = true;
4110 hdev->discovery.type = cp->type;
4111 hdev->discovery.rssi = cp->rssi;
4112 hdev->discovery.uuid_count = uuid_count;
4113
4114 if (uuid_count > 0) {
4115 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4116 GFP_KERNEL);
4117 if (!hdev->discovery.uuids) {
4118 err = cmd_complete(sk, hdev->id,
4119 MGMT_OP_START_SERVICE_DISCOVERY,
4120 MGMT_STATUS_FAILED,
4121 &cp->type, sizeof(cp->type));
4122 mgmt_pending_remove(cmd);
4123 goto failed;
4124 }
4125 }
4126
4127 hci_req_init(&req, hdev);
4128
4129 if (!trigger_discovery(&req, &status)) {
4130 err = cmd_complete(sk, hdev->id,
4131 MGMT_OP_START_SERVICE_DISCOVERY,
4132 status, &cp->type, sizeof(cp->type));
4133 mgmt_pending_remove(cmd);
4134 goto failed;
4135 }
4136
4137 err = hci_req_run(&req, start_discovery_complete);
4138 if (err < 0) {
4139 mgmt_pending_remove(cmd);
4140 goto failed;
4141 }
4142
4143 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4144
4145 failed:
4146 hci_dev_unlock(hdev);
4147 return err;
4148 }
4149
4150 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4151 {
4152 struct pending_cmd *cmd;
4153
4154 BT_DBG("status %d", status);
4155
4156 hci_dev_lock(hdev);
4157
4158 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4159 if (cmd) {
4160 cmd->cmd_complete(cmd, mgmt_status(status));
4161 mgmt_pending_remove(cmd);
4162 }
4163
4164 if (!status)
4165 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4166
4167 hci_dev_unlock(hdev);
4168 }
4169
4170 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4171 u16 len)
4172 {
4173 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4174 struct pending_cmd *cmd;
4175 struct hci_request req;
4176 int err;
4177
4178 BT_DBG("%s", hdev->name);
4179
4180 hci_dev_lock(hdev);
4181
4182 if (!hci_discovery_active(hdev)) {
4183 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4184 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4185 sizeof(mgmt_cp->type));
4186 goto unlock;
4187 }
4188
4189 if (hdev->discovery.type != mgmt_cp->type) {
4190 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4191 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4192 sizeof(mgmt_cp->type));
4193 goto unlock;
4194 }
4195
4196 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4197 if (!cmd) {
4198 err = -ENOMEM;
4199 goto unlock;
4200 }
4201
4202 cmd->cmd_complete = generic_cmd_complete;
4203
4204 hci_req_init(&req, hdev);
4205
4206 hci_stop_discovery(&req);
4207
4208 err = hci_req_run(&req, stop_discovery_complete);
4209 if (!err) {
4210 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4211 goto unlock;
4212 }
4213
4214 mgmt_pending_remove(cmd);
4215
4216 /* If no HCI commands were sent we're done */
4217 if (err == -ENODATA) {
4218 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4219 &mgmt_cp->type, sizeof(mgmt_cp->type));
4220 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4221 }
4222
4223 unlock:
4224 hci_dev_unlock(hdev);
4225 return err;
4226 }
4227
4228 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4229 u16 len)
4230 {
4231 struct mgmt_cp_confirm_name *cp = data;
4232 struct inquiry_entry *e;
4233 int err;
4234
4235 BT_DBG("%s", hdev->name);
4236
4237 hci_dev_lock(hdev);
4238
4239 if (!hci_discovery_active(hdev)) {
4240 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4241 MGMT_STATUS_FAILED, &cp->addr,
4242 sizeof(cp->addr));
4243 goto failed;
4244 }
4245
4246 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4247 if (!e) {
4248 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4249 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4250 sizeof(cp->addr));
4251 goto failed;
4252 }
4253
4254 if (cp->name_known) {
4255 e->name_state = NAME_KNOWN;
4256 list_del(&e->list);
4257 } else {
4258 e->name_state = NAME_NEEDED;
4259 hci_inquiry_cache_update_resolve(hdev, e);
4260 }
4261
4262 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4263 sizeof(cp->addr));
4264
4265 failed:
4266 hci_dev_unlock(hdev);
4267 return err;
4268 }
4269
4270 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4271 u16 len)
4272 {
4273 struct mgmt_cp_block_device *cp = data;
4274 u8 status;
4275 int err;
4276
4277 BT_DBG("%s", hdev->name);
4278
4279 if (!bdaddr_type_is_valid(cp->addr.type))
4280 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4281 MGMT_STATUS_INVALID_PARAMS,
4282 &cp->addr, sizeof(cp->addr));
4283
4284 hci_dev_lock(hdev);
4285
4286 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4287 cp->addr.type);
4288 if (err < 0) {
4289 status = MGMT_STATUS_FAILED;
4290 goto done;
4291 }
4292
4293 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4294 sk);
4295 status = MGMT_STATUS_SUCCESS;
4296
4297 done:
4298 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4299 &cp->addr, sizeof(cp->addr));
4300
4301 hci_dev_unlock(hdev);
4302
4303 return err;
4304 }
4305
4306 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4307 u16 len)
4308 {
4309 struct mgmt_cp_unblock_device *cp = data;
4310 u8 status;
4311 int err;
4312
4313 BT_DBG("%s", hdev->name);
4314
4315 if (!bdaddr_type_is_valid(cp->addr.type))
4316 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4317 MGMT_STATUS_INVALID_PARAMS,
4318 &cp->addr, sizeof(cp->addr));
4319
4320 hci_dev_lock(hdev);
4321
4322 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4323 cp->addr.type);
4324 if (err < 0) {
4325 status = MGMT_STATUS_INVALID_PARAMS;
4326 goto done;
4327 }
4328
4329 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4330 sk);
4331 status = MGMT_STATUS_SUCCESS;
4332
4333 done:
4334 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4335 &cp->addr, sizeof(cp->addr));
4336
4337 hci_dev_unlock(hdev);
4338
4339 return err;
4340 }
4341
4342 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4343 u16 len)
4344 {
4345 struct mgmt_cp_set_device_id *cp = data;
4346 struct hci_request req;
4347 int err;
4348 __u16 source;
4349
4350 BT_DBG("%s", hdev->name);
4351
4352 source = __le16_to_cpu(cp->source);
4353
4354 if (source > 0x0002)
4355 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4356 MGMT_STATUS_INVALID_PARAMS);
4357
4358 hci_dev_lock(hdev);
4359
4360 hdev->devid_source = source;
4361 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4362 hdev->devid_product = __le16_to_cpu(cp->product);
4363 hdev->devid_version = __le16_to_cpu(cp->version);
4364
4365 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4366
4367 hci_req_init(&req, hdev);
4368 update_eir(&req);
4369 hci_req_run(&req, NULL);
4370
4371 hci_dev_unlock(hdev);
4372
4373 return err;
4374 }
4375
4376 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4377 u16 opcode)
4378 {
4379 struct cmd_lookup match = { NULL, hdev };
4380
4381 hci_dev_lock(hdev);
4382
4383 if (status) {
4384 u8 mgmt_err = mgmt_status(status);
4385
4386 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4387 cmd_status_rsp, &mgmt_err);
4388 goto unlock;
4389 }
4390
4391 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4392 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4393 else
4394 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4395
4396 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4397 &match);
4398
4399 new_settings(hdev, match.sk);
4400
4401 if (match.sk)
4402 sock_put(match.sk);
4403
4404 unlock:
4405 hci_dev_unlock(hdev);
4406 }
4407
4408 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4409 u16 len)
4410 {
4411 struct mgmt_mode *cp = data;
4412 struct pending_cmd *cmd;
4413 struct hci_request req;
4414 u8 val, enabled, status;
4415 int err;
4416
4417 BT_DBG("request for %s", hdev->name);
4418
4419 status = mgmt_le_support(hdev);
4420 if (status)
4421 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4422 status);
4423
4424 if (cp->val != 0x00 && cp->val != 0x01)
4425 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4426 MGMT_STATUS_INVALID_PARAMS);
4427
4428 hci_dev_lock(hdev);
4429
4430 val = !!cp->val;
4431 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4432
4433 /* The following conditions are ones which mean that we should
4434 * not do any HCI communication but directly send a mgmt
4435 * response to user space (after toggling the flag if
4436 * necessary).
4437 */
4438 if (!hdev_is_powered(hdev) || val == enabled ||
4439 hci_conn_num(hdev, LE_LINK) > 0 ||
4440 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4441 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4442 bool changed = false;
4443
4444 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4445 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4446 changed = true;
4447 }
4448
4449 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4450 if (err < 0)
4451 goto unlock;
4452
4453 if (changed)
4454 err = new_settings(hdev, sk);
4455
4456 goto unlock;
4457 }
4458
4459 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4460 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4461 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4462 MGMT_STATUS_BUSY);
4463 goto unlock;
4464 }
4465
4466 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4467 if (!cmd) {
4468 err = -ENOMEM;
4469 goto unlock;
4470 }
4471
4472 hci_req_init(&req, hdev);
4473
4474 if (val)
4475 enable_advertising(&req);
4476 else
4477 disable_advertising(&req);
4478
4479 err = hci_req_run(&req, set_advertising_complete);
4480 if (err < 0)
4481 mgmt_pending_remove(cmd);
4482
4483 unlock:
4484 hci_dev_unlock(hdev);
4485 return err;
4486 }
4487
4488 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4489 void *data, u16 len)
4490 {
4491 struct mgmt_cp_set_static_address *cp = data;
4492 int err;
4493
4494 BT_DBG("%s", hdev->name);
4495
4496 if (!lmp_le_capable(hdev))
4497 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4498 MGMT_STATUS_NOT_SUPPORTED);
4499
4500 if (hdev_is_powered(hdev))
4501 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4502 MGMT_STATUS_REJECTED);
4503
4504 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4505 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4506 return cmd_status(sk, hdev->id,
4507 MGMT_OP_SET_STATIC_ADDRESS,
4508 MGMT_STATUS_INVALID_PARAMS);
4509
4510 /* Two most significant bits shall be set */
4511 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4512 return cmd_status(sk, hdev->id,
4513 MGMT_OP_SET_STATIC_ADDRESS,
4514 MGMT_STATUS_INVALID_PARAMS);
4515 }
4516
4517 hci_dev_lock(hdev);
4518
4519 bacpy(&hdev->static_addr, &cp->bdaddr);
4520
4521 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4522 if (err < 0)
4523 goto unlock;
4524
4525 err = new_settings(hdev, sk);
4526
4527 unlock:
4528 hci_dev_unlock(hdev);
4529 return err;
4530 }
4531
4532 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4533 void *data, u16 len)
4534 {
4535 struct mgmt_cp_set_scan_params *cp = data;
4536 __u16 interval, window;
4537 int err;
4538
4539 BT_DBG("%s", hdev->name);
4540
4541 if (!lmp_le_capable(hdev))
4542 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4543 MGMT_STATUS_NOT_SUPPORTED);
4544
4545 interval = __le16_to_cpu(cp->interval);
4546
4547 if (interval < 0x0004 || interval > 0x4000)
4548 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4549 MGMT_STATUS_INVALID_PARAMS);
4550
4551 window = __le16_to_cpu(cp->window);
4552
4553 if (window < 0x0004 || window > 0x4000)
4554 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4555 MGMT_STATUS_INVALID_PARAMS);
4556
4557 if (window > interval)
4558 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4559 MGMT_STATUS_INVALID_PARAMS);
4560
4561 hci_dev_lock(hdev);
4562
4563 hdev->le_scan_interval = interval;
4564 hdev->le_scan_window = window;
4565
4566 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4567
4568 /* If background scan is running, restart it so new parameters are
4569 * loaded.
4570 */
4571 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4572 hdev->discovery.state == DISCOVERY_STOPPED) {
4573 struct hci_request req;
4574
4575 hci_req_init(&req, hdev);
4576
4577 hci_req_add_le_scan_disable(&req);
4578 hci_req_add_le_passive_scan(&req);
4579
4580 hci_req_run(&req, NULL);
4581 }
4582
4583 hci_dev_unlock(hdev);
4584
4585 return err;
4586 }
4587
4588 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4589 u16 opcode)
4590 {
4591 struct pending_cmd *cmd;
4592
4593 BT_DBG("status 0x%02x", status);
4594
4595 hci_dev_lock(hdev);
4596
4597 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4598 if (!cmd)
4599 goto unlock;
4600
4601 if (status) {
4602 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4603 mgmt_status(status));
4604 } else {
4605 struct mgmt_mode *cp = cmd->param;
4606
4607 if (cp->val)
4608 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4609 else
4610 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4611
4612 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4613 new_settings(hdev, cmd->sk);
4614 }
4615
4616 mgmt_pending_remove(cmd);
4617
4618 unlock:
4619 hci_dev_unlock(hdev);
4620 }
4621
4622 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4623 void *data, u16 len)
4624 {
4625 struct mgmt_mode *cp = data;
4626 struct pending_cmd *cmd;
4627 struct hci_request req;
4628 int err;
4629
4630 BT_DBG("%s", hdev->name);
4631
4632 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4633 hdev->hci_ver < BLUETOOTH_VER_1_2)
4634 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4635 MGMT_STATUS_NOT_SUPPORTED);
4636
4637 if (cp->val != 0x00 && cp->val != 0x01)
4638 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4639 MGMT_STATUS_INVALID_PARAMS);
4640
4641 if (!hdev_is_powered(hdev))
4642 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4643 MGMT_STATUS_NOT_POWERED);
4644
4645 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4646 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4647 MGMT_STATUS_REJECTED);
4648
4649 hci_dev_lock(hdev);
4650
4651 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4653 MGMT_STATUS_BUSY);
4654 goto unlock;
4655 }
4656
4657 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4658 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4659 hdev);
4660 goto unlock;
4661 }
4662
4663 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4664 data, len);
4665 if (!cmd) {
4666 err = -ENOMEM;
4667 goto unlock;
4668 }
4669
4670 hci_req_init(&req, hdev);
4671
4672 write_fast_connectable(&req, cp->val);
4673
4674 err = hci_req_run(&req, fast_connectable_complete);
4675 if (err < 0) {
4676 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4677 MGMT_STATUS_FAILED);
4678 mgmt_pending_remove(cmd);
4679 }
4680
4681 unlock:
4682 hci_dev_unlock(hdev);
4683
4684 return err;
4685 }
4686
4687 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4688 {
4689 struct pending_cmd *cmd;
4690
4691 BT_DBG("status 0x%02x", status);
4692
4693 hci_dev_lock(hdev);
4694
4695 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4696 if (!cmd)
4697 goto unlock;
4698
4699 if (status) {
4700 u8 mgmt_err = mgmt_status(status);
4701
4702 /* We need to restore the flag if related HCI commands
4703 * failed.
4704 */
4705 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4706
4707 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4708 } else {
4709 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4710 new_settings(hdev, cmd->sk);
4711 }
4712
4713 mgmt_pending_remove(cmd);
4714
4715 unlock:
4716 hci_dev_unlock(hdev);
4717 }
4718
4719 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4720 {
4721 struct mgmt_mode *cp = data;
4722 struct pending_cmd *cmd;
4723 struct hci_request req;
4724 int err;
4725
4726 BT_DBG("request for %s", hdev->name);
4727
4728 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4729 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4730 MGMT_STATUS_NOT_SUPPORTED);
4731
4732 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4733 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4734 MGMT_STATUS_REJECTED);
4735
4736 if (cp->val != 0x00 && cp->val != 0x01)
4737 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4738 MGMT_STATUS_INVALID_PARAMS);
4739
4740 hci_dev_lock(hdev);
4741
4742 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4743 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4744 goto unlock;
4745 }
4746
4747 if (!hdev_is_powered(hdev)) {
4748 if (!cp->val) {
4749 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4750 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4751 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4752 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4753 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4754 }
4755
4756 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4757
4758 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4759 if (err < 0)
4760 goto unlock;
4761
4762 err = new_settings(hdev, sk);
4763 goto unlock;
4764 }
4765
4766 /* Reject disabling when powered on */
4767 if (!cp->val) {
4768 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4769 MGMT_STATUS_REJECTED);
4770 goto unlock;
4771 } else {
4772 /* When configuring a dual-mode controller to operate
4773 * with LE only and using a static address, then switching
4774 * BR/EDR back on is not allowed.
4775 *
4776 * Dual-mode controllers shall operate with the public
4777 * address as its identity address for BR/EDR and LE. So
4778 * reject the attempt to create an invalid configuration.
4779 *
4780 * The same restrictions applies when secure connections
4781 * has been enabled. For BR/EDR this is a controller feature
4782 * while for LE it is a host stack feature. This means that
4783 * switching BR/EDR back on when secure connections has been
4784 * enabled is not a supported transaction.
4785 */
4786 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4787 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4788 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4789 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4790 MGMT_STATUS_REJECTED);
4791 goto unlock;
4792 }
4793 }
4794
4795 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4796 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4797 MGMT_STATUS_BUSY);
4798 goto unlock;
4799 }
4800
4801 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4802 if (!cmd) {
4803 err = -ENOMEM;
4804 goto unlock;
4805 }
4806
4807 /* We need to flip the bit already here so that update_adv_data
4808 * generates the correct flags.
4809 */
4810 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4811
4812 hci_req_init(&req, hdev);
4813
4814 write_fast_connectable(&req, false);
4815 __hci_update_page_scan(&req);
4816
4817 /* Since only the advertising data flags will change, there
4818 * is no need to update the scan response data.
4819 */
4820 update_adv_data(&req);
4821
4822 err = hci_req_run(&req, set_bredr_complete);
4823 if (err < 0)
4824 mgmt_pending_remove(cmd);
4825
4826 unlock:
4827 hci_dev_unlock(hdev);
4828 return err;
4829 }
4830
4831 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4832 {
4833 struct pending_cmd *cmd;
4834 struct mgmt_mode *cp;
4835
4836 BT_DBG("%s status %u", hdev->name, status);
4837
4838 hci_dev_lock(hdev);
4839
4840 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4841 if (!cmd)
4842 goto unlock;
4843
4844 if (status) {
4845 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4846 mgmt_status(status));
4847 goto remove;
4848 }
4849
4850 cp = cmd->param;
4851
4852 switch (cp->val) {
4853 case 0x00:
4854 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4855 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4856 break;
4857 case 0x01:
4858 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4859 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4860 break;
4861 case 0x02:
4862 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4863 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4864 break;
4865 }
4866
4867 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4868 new_settings(hdev, cmd->sk);
4869
4870 remove:
4871 mgmt_pending_remove(cmd);
4872 unlock:
4873 hci_dev_unlock(hdev);
4874 }
4875
4876 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4877 void *data, u16 len)
4878 {
4879 struct mgmt_mode *cp = data;
4880 struct pending_cmd *cmd;
4881 struct hci_request req;
4882 u8 val;
4883 int err;
4884
4885 BT_DBG("request for %s", hdev->name);
4886
4887 if (!lmp_sc_capable(hdev) &&
4888 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4889 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4890 MGMT_STATUS_NOT_SUPPORTED);
4891
4892 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4893 lmp_sc_capable(hdev) &&
4894 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4895 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4896 MGMT_STATUS_REJECTED);
4897
4898 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4899 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4900 MGMT_STATUS_INVALID_PARAMS);
4901
4902 hci_dev_lock(hdev);
4903
4904 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4905 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4906 bool changed;
4907
4908 if (cp->val) {
4909 changed = !test_and_set_bit(HCI_SC_ENABLED,
4910 &hdev->dev_flags);
4911 if (cp->val == 0x02)
4912 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4913 else
4914 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4915 } else {
4916 changed = test_and_clear_bit(HCI_SC_ENABLED,
4917 &hdev->dev_flags);
4918 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4919 }
4920
4921 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4922 if (err < 0)
4923 goto failed;
4924
4925 if (changed)
4926 err = new_settings(hdev, sk);
4927
4928 goto failed;
4929 }
4930
4931 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4933 MGMT_STATUS_BUSY);
4934 goto failed;
4935 }
4936
4937 val = !!cp->val;
4938
4939 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4940 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4941 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4942 goto failed;
4943 }
4944
4945 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4946 if (!cmd) {
4947 err = -ENOMEM;
4948 goto failed;
4949 }
4950
4951 hci_req_init(&req, hdev);
4952 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4953 err = hci_req_run(&req, sc_enable_complete);
4954 if (err < 0) {
4955 mgmt_pending_remove(cmd);
4956 goto failed;
4957 }
4958
4959 failed:
4960 hci_dev_unlock(hdev);
4961 return err;
4962 }
4963
4964 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4965 void *data, u16 len)
4966 {
4967 struct mgmt_mode *cp = data;
4968 bool changed, use_changed;
4969 int err;
4970
4971 BT_DBG("request for %s", hdev->name);
4972
4973 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4974 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4975 MGMT_STATUS_INVALID_PARAMS);
4976
4977 hci_dev_lock(hdev);
4978
4979 if (cp->val)
4980 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4981 &hdev->dev_flags);
4982 else
4983 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4984 &hdev->dev_flags);
4985
4986 if (cp->val == 0x02)
4987 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4988 &hdev->dev_flags);
4989 else
4990 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4991 &hdev->dev_flags);
4992
4993 if (hdev_is_powered(hdev) && use_changed &&
4994 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4995 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4996 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4997 sizeof(mode), &mode);
4998 }
4999
5000 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5001 if (err < 0)
5002 goto unlock;
5003
5004 if (changed)
5005 err = new_settings(hdev, sk);
5006
5007 unlock:
5008 hci_dev_unlock(hdev);
5009 return err;
5010 }
5011
5012 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5013 u16 len)
5014 {
5015 struct mgmt_cp_set_privacy *cp = cp_data;
5016 bool changed;
5017 int err;
5018
5019 BT_DBG("request for %s", hdev->name);
5020
5021 if (!lmp_le_capable(hdev))
5022 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5023 MGMT_STATUS_NOT_SUPPORTED);
5024
5025 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5026 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5027 MGMT_STATUS_INVALID_PARAMS);
5028
5029 if (hdev_is_powered(hdev))
5030 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5031 MGMT_STATUS_REJECTED);
5032
5033 hci_dev_lock(hdev);
5034
5035 /* If user space supports this command it is also expected to
5036 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5037 */
5038 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5039
5040 if (cp->privacy) {
5041 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5042 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5043 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5044 } else {
5045 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5046 memset(hdev->irk, 0, sizeof(hdev->irk));
5047 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5048 }
5049
5050 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5051 if (err < 0)
5052 goto unlock;
5053
5054 if (changed)
5055 err = new_settings(hdev, sk);
5056
5057 unlock:
5058 hci_dev_unlock(hdev);
5059 return err;
5060 }
5061
5062 static bool irk_is_valid(struct mgmt_irk_info *irk)
5063 {
5064 switch (irk->addr.type) {
5065 case BDADDR_LE_PUBLIC:
5066 return true;
5067
5068 case BDADDR_LE_RANDOM:
5069 /* Two most significant bits shall be set */
5070 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5071 return false;
5072 return true;
5073 }
5074
5075 return false;
5076 }
5077
5078 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5079 u16 len)
5080 {
5081 struct mgmt_cp_load_irks *cp = cp_data;
5082 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5083 sizeof(struct mgmt_irk_info));
5084 u16 irk_count, expected_len;
5085 int i, err;
5086
5087 BT_DBG("request for %s", hdev->name);
5088
5089 if (!lmp_le_capable(hdev))
5090 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5091 MGMT_STATUS_NOT_SUPPORTED);
5092
5093 irk_count = __le16_to_cpu(cp->irk_count);
5094 if (irk_count > max_irk_count) {
5095 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5096 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5097 MGMT_STATUS_INVALID_PARAMS);
5098 }
5099
5100 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5101 if (expected_len != len) {
5102 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5103 expected_len, len);
5104 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5105 MGMT_STATUS_INVALID_PARAMS);
5106 }
5107
5108 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5109
5110 for (i = 0; i < irk_count; i++) {
5111 struct mgmt_irk_info *key = &cp->irks[i];
5112
5113 if (!irk_is_valid(key))
5114 return cmd_status(sk, hdev->id,
5115 MGMT_OP_LOAD_IRKS,
5116 MGMT_STATUS_INVALID_PARAMS);
5117 }
5118
5119 hci_dev_lock(hdev);
5120
5121 hci_smp_irks_clear(hdev);
5122
5123 for (i = 0; i < irk_count; i++) {
5124 struct mgmt_irk_info *irk = &cp->irks[i];
5125 u8 addr_type;
5126
5127 if (irk->addr.type == BDADDR_LE_PUBLIC)
5128 addr_type = ADDR_LE_DEV_PUBLIC;
5129 else
5130 addr_type = ADDR_LE_DEV_RANDOM;
5131
5132 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5133 BDADDR_ANY);
5134 }
5135
5136 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5137
5138 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5139
5140 hci_dev_unlock(hdev);
5141
5142 return err;
5143 }
5144
5145 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5146 {
5147 if (key->master != 0x00 && key->master != 0x01)
5148 return false;
5149
5150 switch (key->addr.type) {
5151 case BDADDR_LE_PUBLIC:
5152 return true;
5153
5154 case BDADDR_LE_RANDOM:
5155 /* Two most significant bits shall be set */
5156 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5157 return false;
5158 return true;
5159 }
5160
5161 return false;
5162 }
5163
5164 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5165 void *cp_data, u16 len)
5166 {
5167 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5168 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5169 sizeof(struct mgmt_ltk_info));
5170 u16 key_count, expected_len;
5171 int i, err;
5172
5173 BT_DBG("request for %s", hdev->name);
5174
5175 if (!lmp_le_capable(hdev))
5176 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5177 MGMT_STATUS_NOT_SUPPORTED);
5178
5179 key_count = __le16_to_cpu(cp->key_count);
5180 if (key_count > max_key_count) {
5181 BT_ERR("load_ltks: too big key_count value %u", key_count);
5182 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5183 MGMT_STATUS_INVALID_PARAMS);
5184 }
5185
5186 expected_len = sizeof(*cp) + key_count *
5187 sizeof(struct mgmt_ltk_info);
5188 if (expected_len != len) {
5189 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5190 expected_len, len);
5191 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5192 MGMT_STATUS_INVALID_PARAMS);
5193 }
5194
5195 BT_DBG("%s key_count %u", hdev->name, key_count);
5196
5197 for (i = 0; i < key_count; i++) {
5198 struct mgmt_ltk_info *key = &cp->keys[i];
5199
5200 if (!ltk_is_valid(key))
5201 return cmd_status(sk, hdev->id,
5202 MGMT_OP_LOAD_LONG_TERM_KEYS,
5203 MGMT_STATUS_INVALID_PARAMS);
5204 }
5205
5206 hci_dev_lock(hdev);
5207
5208 hci_smp_ltks_clear(hdev);
5209
5210 for (i = 0; i < key_count; i++) {
5211 struct mgmt_ltk_info *key = &cp->keys[i];
5212 u8 type, addr_type, authenticated;
5213
5214 if (key->addr.type == BDADDR_LE_PUBLIC)
5215 addr_type = ADDR_LE_DEV_PUBLIC;
5216 else
5217 addr_type = ADDR_LE_DEV_RANDOM;
5218
5219 switch (key->type) {
5220 case MGMT_LTK_UNAUTHENTICATED:
5221 authenticated = 0x00;
5222 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5223 break;
5224 case MGMT_LTK_AUTHENTICATED:
5225 authenticated = 0x01;
5226 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5227 break;
5228 case MGMT_LTK_P256_UNAUTH:
5229 authenticated = 0x00;
5230 type = SMP_LTK_P256;
5231 break;
5232 case MGMT_LTK_P256_AUTH:
5233 authenticated = 0x01;
5234 type = SMP_LTK_P256;
5235 break;
5236 case MGMT_LTK_P256_DEBUG:
5237 authenticated = 0x00;
5238 type = SMP_LTK_P256_DEBUG;
5239 default:
5240 continue;
5241 }
5242
5243 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5244 authenticated, key->val, key->enc_size, key->ediv,
5245 key->rand);
5246 }
5247
5248 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5249 NULL, 0);
5250
5251 hci_dev_unlock(hdev);
5252
5253 return err;
5254 }
5255
5256 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5257 {
5258 struct hci_conn *conn = cmd->user_data;
5259 struct mgmt_rp_get_conn_info rp;
5260 int err;
5261
5262 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5263
5264 if (status == MGMT_STATUS_SUCCESS) {
5265 rp.rssi = conn->rssi;
5266 rp.tx_power = conn->tx_power;
5267 rp.max_tx_power = conn->max_tx_power;
5268 } else {
5269 rp.rssi = HCI_RSSI_INVALID;
5270 rp.tx_power = HCI_TX_POWER_INVALID;
5271 rp.max_tx_power = HCI_TX_POWER_INVALID;
5272 }
5273
5274 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5275 &rp, sizeof(rp));
5276
5277 hci_conn_drop(conn);
5278 hci_conn_put(conn);
5279
5280 return err;
5281 }
5282
5283 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5284 u16 opcode)
5285 {
5286 struct hci_cp_read_rssi *cp;
5287 struct pending_cmd *cmd;
5288 struct hci_conn *conn;
5289 u16 handle;
5290 u8 status;
5291
5292 BT_DBG("status 0x%02x", hci_status);
5293
5294 hci_dev_lock(hdev);
5295
5296 /* Commands sent in request are either Read RSSI or Read Transmit Power
5297 * Level so we check which one was last sent to retrieve connection
5298 * handle. Both commands have handle as first parameter so it's safe to
5299 * cast data on the same command struct.
5300 *
5301 * First command sent is always Read RSSI and we fail only if it fails.
5302 * In other case we simply override error to indicate success as we
5303 * already remembered if TX power value is actually valid.
5304 */
5305 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5306 if (!cp) {
5307 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5308 status = MGMT_STATUS_SUCCESS;
5309 } else {
5310 status = mgmt_status(hci_status);
5311 }
5312
5313 if (!cp) {
5314 BT_ERR("invalid sent_cmd in conn_info response");
5315 goto unlock;
5316 }
5317
5318 handle = __le16_to_cpu(cp->handle);
5319 conn = hci_conn_hash_lookup_handle(hdev, handle);
5320 if (!conn) {
5321 BT_ERR("unknown handle (%d) in conn_info response", handle);
5322 goto unlock;
5323 }
5324
5325 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5326 if (!cmd)
5327 goto unlock;
5328
5329 cmd->cmd_complete(cmd, status);
5330 mgmt_pending_remove(cmd);
5331
5332 unlock:
5333 hci_dev_unlock(hdev);
5334 }
5335
5336 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5337 u16 len)
5338 {
5339 struct mgmt_cp_get_conn_info *cp = data;
5340 struct mgmt_rp_get_conn_info rp;
5341 struct hci_conn *conn;
5342 unsigned long conn_info_age;
5343 int err = 0;
5344
5345 BT_DBG("%s", hdev->name);
5346
5347 memset(&rp, 0, sizeof(rp));
5348 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5349 rp.addr.type = cp->addr.type;
5350
5351 if (!bdaddr_type_is_valid(cp->addr.type))
5352 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5353 MGMT_STATUS_INVALID_PARAMS,
5354 &rp, sizeof(rp));
5355
5356 hci_dev_lock(hdev);
5357
5358 if (!hdev_is_powered(hdev)) {
5359 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5360 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5361 goto unlock;
5362 }
5363
5364 if (cp->addr.type == BDADDR_BREDR)
5365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5366 &cp->addr.bdaddr);
5367 else
5368 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5369
5370 if (!conn || conn->state != BT_CONNECTED) {
5371 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5372 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5373 goto unlock;
5374 }
5375
5376 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5378 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5379 goto unlock;
5380 }
5381
5382 /* To avoid client trying to guess when to poll again for information we
5383 * calculate conn info age as random value between min/max set in hdev.
5384 */
5385 conn_info_age = hdev->conn_info_min_age +
5386 prandom_u32_max(hdev->conn_info_max_age -
5387 hdev->conn_info_min_age);
5388
5389 /* Query controller to refresh cached values if they are too old or were
5390 * never read.
5391 */
5392 if (time_after(jiffies, conn->conn_info_timestamp +
5393 msecs_to_jiffies(conn_info_age)) ||
5394 !conn->conn_info_timestamp) {
5395 struct hci_request req;
5396 struct hci_cp_read_tx_power req_txp_cp;
5397 struct hci_cp_read_rssi req_rssi_cp;
5398 struct pending_cmd *cmd;
5399
5400 hci_req_init(&req, hdev);
5401 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5402 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5403 &req_rssi_cp);
5404
5405 /* For LE links TX power does not change thus we don't need to
5406 * query for it once value is known.
5407 */
5408 if (!bdaddr_type_is_le(cp->addr.type) ||
5409 conn->tx_power == HCI_TX_POWER_INVALID) {
5410 req_txp_cp.handle = cpu_to_le16(conn->handle);
5411 req_txp_cp.type = 0x00;
5412 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5413 sizeof(req_txp_cp), &req_txp_cp);
5414 }
5415
5416 /* Max TX power needs to be read only once per connection */
5417 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5418 req_txp_cp.handle = cpu_to_le16(conn->handle);
5419 req_txp_cp.type = 0x01;
5420 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5421 sizeof(req_txp_cp), &req_txp_cp);
5422 }
5423
5424 err = hci_req_run(&req, conn_info_refresh_complete);
5425 if (err < 0)
5426 goto unlock;
5427
5428 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5429 data, len);
5430 if (!cmd) {
5431 err = -ENOMEM;
5432 goto unlock;
5433 }
5434
5435 hci_conn_hold(conn);
5436 cmd->user_data = hci_conn_get(conn);
5437 cmd->cmd_complete = conn_info_cmd_complete;
5438
5439 conn->conn_info_timestamp = jiffies;
5440 } else {
5441 /* Cache is valid, just reply with values cached in hci_conn */
5442 rp.rssi = conn->rssi;
5443 rp.tx_power = conn->tx_power;
5444 rp.max_tx_power = conn->max_tx_power;
5445
5446 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5447 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5448 }
5449
5450 unlock:
5451 hci_dev_unlock(hdev);
5452 return err;
5453 }
5454
5455 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5456 {
5457 struct hci_conn *conn = cmd->user_data;
5458 struct mgmt_rp_get_clock_info rp;
5459 struct hci_dev *hdev;
5460 int err;
5461
5462 memset(&rp, 0, sizeof(rp));
5463 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5464
5465 if (status)
5466 goto complete;
5467
5468 hdev = hci_dev_get(cmd->index);
5469 if (hdev) {
5470 rp.local_clock = cpu_to_le32(hdev->clock);
5471 hci_dev_put(hdev);
5472 }
5473
5474 if (conn) {
5475 rp.piconet_clock = cpu_to_le32(conn->clock);
5476 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5477 }
5478
5479 complete:
5480 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5481 sizeof(rp));
5482
5483 if (conn) {
5484 hci_conn_drop(conn);
5485 hci_conn_put(conn);
5486 }
5487
5488 return err;
5489 }
5490
5491 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5492 {
5493 struct hci_cp_read_clock *hci_cp;
5494 struct pending_cmd *cmd;
5495 struct hci_conn *conn;
5496
5497 BT_DBG("%s status %u", hdev->name, status);
5498
5499 hci_dev_lock(hdev);
5500
5501 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5502 if (!hci_cp)
5503 goto unlock;
5504
5505 if (hci_cp->which) {
5506 u16 handle = __le16_to_cpu(hci_cp->handle);
5507 conn = hci_conn_hash_lookup_handle(hdev, handle);
5508 } else {
5509 conn = NULL;
5510 }
5511
5512 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5513 if (!cmd)
5514 goto unlock;
5515
5516 cmd->cmd_complete(cmd, mgmt_status(status));
5517 mgmt_pending_remove(cmd);
5518
5519 unlock:
5520 hci_dev_unlock(hdev);
5521 }
5522
5523 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5524 u16 len)
5525 {
5526 struct mgmt_cp_get_clock_info *cp = data;
5527 struct mgmt_rp_get_clock_info rp;
5528 struct hci_cp_read_clock hci_cp;
5529 struct pending_cmd *cmd;
5530 struct hci_request req;
5531 struct hci_conn *conn;
5532 int err;
5533
5534 BT_DBG("%s", hdev->name);
5535
5536 memset(&rp, 0, sizeof(rp));
5537 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5538 rp.addr.type = cp->addr.type;
5539
5540 if (cp->addr.type != BDADDR_BREDR)
5541 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5542 MGMT_STATUS_INVALID_PARAMS,
5543 &rp, sizeof(rp));
5544
5545 hci_dev_lock(hdev);
5546
5547 if (!hdev_is_powered(hdev)) {
5548 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5549 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5550 goto unlock;
5551 }
5552
5553 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5555 &cp->addr.bdaddr);
5556 if (!conn || conn->state != BT_CONNECTED) {
5557 err = cmd_complete(sk, hdev->id,
5558 MGMT_OP_GET_CLOCK_INFO,
5559 MGMT_STATUS_NOT_CONNECTED,
5560 &rp, sizeof(rp));
5561 goto unlock;
5562 }
5563 } else {
5564 conn = NULL;
5565 }
5566
5567 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5568 if (!cmd) {
5569 err = -ENOMEM;
5570 goto unlock;
5571 }
5572
5573 cmd->cmd_complete = clock_info_cmd_complete;
5574
5575 hci_req_init(&req, hdev);
5576
5577 memset(&hci_cp, 0, sizeof(hci_cp));
5578 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5579
5580 if (conn) {
5581 hci_conn_hold(conn);
5582 cmd->user_data = hci_conn_get(conn);
5583
5584 hci_cp.handle = cpu_to_le16(conn->handle);
5585 hci_cp.which = 0x01; /* Piconet clock */
5586 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5587 }
5588
5589 err = hci_req_run(&req, get_clock_info_complete);
5590 if (err < 0)
5591 mgmt_pending_remove(cmd);
5592
5593 unlock:
5594 hci_dev_unlock(hdev);
5595 return err;
5596 }
5597
5598 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5599 {
5600 struct hci_conn *conn;
5601
5602 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5603 if (!conn)
5604 return false;
5605
5606 if (conn->dst_type != type)
5607 return false;
5608
5609 if (conn->state != BT_CONNECTED)
5610 return false;
5611
5612 return true;
5613 }
5614
5615 /* This function requires the caller holds hdev->lock */
5616 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5617 u8 addr_type, u8 auto_connect)
5618 {
5619 struct hci_dev *hdev = req->hdev;
5620 struct hci_conn_params *params;
5621
5622 params = hci_conn_params_add(hdev, addr, addr_type);
5623 if (!params)
5624 return -EIO;
5625
5626 if (params->auto_connect == auto_connect)
5627 return 0;
5628
5629 list_del_init(&params->action);
5630
5631 switch (auto_connect) {
5632 case HCI_AUTO_CONN_DISABLED:
5633 case HCI_AUTO_CONN_LINK_LOSS:
5634 __hci_update_background_scan(req);
5635 break;
5636 case HCI_AUTO_CONN_REPORT:
5637 list_add(&params->action, &hdev->pend_le_reports);
5638 __hci_update_background_scan(req);
5639 break;
5640 case HCI_AUTO_CONN_DIRECT:
5641 case HCI_AUTO_CONN_ALWAYS:
5642 if (!is_connected(hdev, addr, addr_type)) {
5643 list_add(&params->action, &hdev->pend_le_conns);
5644 __hci_update_background_scan(req);
5645 }
5646 break;
5647 }
5648
5649 params->auto_connect = auto_connect;
5650
5651 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5652 auto_connect);
5653
5654 return 0;
5655 }
5656
5657 static void device_added(struct sock *sk, struct hci_dev *hdev,
5658 bdaddr_t *bdaddr, u8 type, u8 action)
5659 {
5660 struct mgmt_ev_device_added ev;
5661
5662 bacpy(&ev.addr.bdaddr, bdaddr);
5663 ev.addr.type = type;
5664 ev.action = action;
5665
5666 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5667 }
5668
5669 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5670 {
5671 struct pending_cmd *cmd;
5672
5673 BT_DBG("status 0x%02x", status);
5674
5675 hci_dev_lock(hdev);
5676
5677 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5678 if (!cmd)
5679 goto unlock;
5680
5681 cmd->cmd_complete(cmd, mgmt_status(status));
5682 mgmt_pending_remove(cmd);
5683
5684 unlock:
5685 hci_dev_unlock(hdev);
5686 }
5687
5688 static int add_device(struct sock *sk, struct hci_dev *hdev,
5689 void *data, u16 len)
5690 {
5691 struct mgmt_cp_add_device *cp = data;
5692 struct pending_cmd *cmd;
5693 struct hci_request req;
5694 u8 auto_conn, addr_type;
5695 int err;
5696
5697 BT_DBG("%s", hdev->name);
5698
5699 if (!bdaddr_type_is_valid(cp->addr.type) ||
5700 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5701 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5702 MGMT_STATUS_INVALID_PARAMS,
5703 &cp->addr, sizeof(cp->addr));
5704
5705 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5706 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5707 MGMT_STATUS_INVALID_PARAMS,
5708 &cp->addr, sizeof(cp->addr));
5709
5710 hci_req_init(&req, hdev);
5711
5712 hci_dev_lock(hdev);
5713
5714 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5715 if (!cmd) {
5716 err = -ENOMEM;
5717 goto unlock;
5718 }
5719
5720 cmd->cmd_complete = addr_cmd_complete;
5721
5722 if (cp->addr.type == BDADDR_BREDR) {
5723 /* Only incoming connections action is supported for now */
5724 if (cp->action != 0x01) {
5725 err = cmd->cmd_complete(cmd,
5726 MGMT_STATUS_INVALID_PARAMS);
5727 mgmt_pending_remove(cmd);
5728 goto unlock;
5729 }
5730
5731 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5732 cp->addr.type);
5733 if (err)
5734 goto unlock;
5735
5736 __hci_update_page_scan(&req);
5737
5738 goto added;
5739 }
5740
5741 if (cp->addr.type == BDADDR_LE_PUBLIC)
5742 addr_type = ADDR_LE_DEV_PUBLIC;
5743 else
5744 addr_type = ADDR_LE_DEV_RANDOM;
5745
5746 if (cp->action == 0x02)
5747 auto_conn = HCI_AUTO_CONN_ALWAYS;
5748 else if (cp->action == 0x01)
5749 auto_conn = HCI_AUTO_CONN_DIRECT;
5750 else
5751 auto_conn = HCI_AUTO_CONN_REPORT;
5752
5753 /* If the connection parameters don't exist for this device,
5754 * they will be created and configured with defaults.
5755 */
5756 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5757 auto_conn) < 0) {
5758 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5759 mgmt_pending_remove(cmd);
5760 goto unlock;
5761 }
5762
5763 added:
5764 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5765
5766 err = hci_req_run(&req, add_device_complete);
5767 if (err < 0) {
5768 /* ENODATA means no HCI commands were needed (e.g. if
5769 * the adapter is powered off).
5770 */
5771 if (err == -ENODATA)
5772 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5773 mgmt_pending_remove(cmd);
5774 }
5775
5776 unlock:
5777 hci_dev_unlock(hdev);
5778 return err;
5779 }
5780
5781 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5782 bdaddr_t *bdaddr, u8 type)
5783 {
5784 struct mgmt_ev_device_removed ev;
5785
5786 bacpy(&ev.addr.bdaddr, bdaddr);
5787 ev.addr.type = type;
5788
5789 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5790 }
5791
5792 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5793 {
5794 struct pending_cmd *cmd;
5795
5796 BT_DBG("status 0x%02x", status);
5797
5798 hci_dev_lock(hdev);
5799
5800 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5801 if (!cmd)
5802 goto unlock;
5803
5804 cmd->cmd_complete(cmd, mgmt_status(status));
5805 mgmt_pending_remove(cmd);
5806
5807 unlock:
5808 hci_dev_unlock(hdev);
5809 }
5810
5811 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5812 void *data, u16 len)
5813 {
5814 struct mgmt_cp_remove_device *cp = data;
5815 struct pending_cmd *cmd;
5816 struct hci_request req;
5817 int err;
5818
5819 BT_DBG("%s", hdev->name);
5820
5821 hci_req_init(&req, hdev);
5822
5823 hci_dev_lock(hdev);
5824
5825 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5826 if (!cmd) {
5827 err = -ENOMEM;
5828 goto unlock;
5829 }
5830
5831 cmd->cmd_complete = addr_cmd_complete;
5832
5833 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5834 struct hci_conn_params *params;
5835 u8 addr_type;
5836
5837 if (!bdaddr_type_is_valid(cp->addr.type)) {
5838 err = cmd->cmd_complete(cmd,
5839 MGMT_STATUS_INVALID_PARAMS);
5840 mgmt_pending_remove(cmd);
5841 goto unlock;
5842 }
5843
5844 if (cp->addr.type == BDADDR_BREDR) {
5845 err = hci_bdaddr_list_del(&hdev->whitelist,
5846 &cp->addr.bdaddr,
5847 cp->addr.type);
5848 if (err) {
5849 err = cmd->cmd_complete(cmd,
5850 MGMT_STATUS_INVALID_PARAMS);
5851 mgmt_pending_remove(cmd);
5852 goto unlock;
5853 }
5854
5855 __hci_update_page_scan(&req);
5856
5857 device_removed(sk, hdev, &cp->addr.bdaddr,
5858 cp->addr.type);
5859 goto complete;
5860 }
5861
5862 if (cp->addr.type == BDADDR_LE_PUBLIC)
5863 addr_type = ADDR_LE_DEV_PUBLIC;
5864 else
5865 addr_type = ADDR_LE_DEV_RANDOM;
5866
5867 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5868 addr_type);
5869 if (!params) {
5870 err = cmd->cmd_complete(cmd,
5871 MGMT_STATUS_INVALID_PARAMS);
5872 mgmt_pending_remove(cmd);
5873 goto unlock;
5874 }
5875
5876 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5877 err = cmd->cmd_complete(cmd,
5878 MGMT_STATUS_INVALID_PARAMS);
5879 mgmt_pending_remove(cmd);
5880 goto unlock;
5881 }
5882
5883 list_del(&params->action);
5884 list_del(&params->list);
5885 kfree(params);
5886 __hci_update_background_scan(&req);
5887
5888 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5889 } else {
5890 struct hci_conn_params *p, *tmp;
5891 struct bdaddr_list *b, *btmp;
5892
5893 if (cp->addr.type) {
5894 err = cmd->cmd_complete(cmd,
5895 MGMT_STATUS_INVALID_PARAMS);
5896 mgmt_pending_remove(cmd);
5897 goto unlock;
5898 }
5899
5900 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5901 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5902 list_del(&b->list);
5903 kfree(b);
5904 }
5905
5906 __hci_update_page_scan(&req);
5907
5908 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5909 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5910 continue;
5911 device_removed(sk, hdev, &p->addr, p->addr_type);
5912 list_del(&p->action);
5913 list_del(&p->list);
5914 kfree(p);
5915 }
5916
5917 BT_DBG("All LE connection parameters were removed");
5918
5919 __hci_update_background_scan(&req);
5920 }
5921
5922 complete:
5923 err = hci_req_run(&req, remove_device_complete);
5924 if (err < 0) {
5925 /* ENODATA means no HCI commands were needed (e.g. if
5926 * the adapter is powered off).
5927 */
5928 if (err == -ENODATA)
5929 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5930 mgmt_pending_remove(cmd);
5931 }
5932
5933 unlock:
5934 hci_dev_unlock(hdev);
5935 return err;
5936 }
5937
5938 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5939 u16 len)
5940 {
5941 struct mgmt_cp_load_conn_param *cp = data;
5942 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5943 sizeof(struct mgmt_conn_param));
5944 u16 param_count, expected_len;
5945 int i;
5946
5947 if (!lmp_le_capable(hdev))
5948 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5949 MGMT_STATUS_NOT_SUPPORTED);
5950
5951 param_count = __le16_to_cpu(cp->param_count);
5952 if (param_count > max_param_count) {
5953 BT_ERR("load_conn_param: too big param_count value %u",
5954 param_count);
5955 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5956 MGMT_STATUS_INVALID_PARAMS);
5957 }
5958
5959 expected_len = sizeof(*cp) + param_count *
5960 sizeof(struct mgmt_conn_param);
5961 if (expected_len != len) {
5962 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5963 expected_len, len);
5964 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5965 MGMT_STATUS_INVALID_PARAMS);
5966 }
5967
5968 BT_DBG("%s param_count %u", hdev->name, param_count);
5969
5970 hci_dev_lock(hdev);
5971
5972 hci_conn_params_clear_disabled(hdev);
5973
5974 for (i = 0; i < param_count; i++) {
5975 struct mgmt_conn_param *param = &cp->params[i];
5976 struct hci_conn_params *hci_param;
5977 u16 min, max, latency, timeout;
5978 u8 addr_type;
5979
5980 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5981 param->addr.type);
5982
5983 if (param->addr.type == BDADDR_LE_PUBLIC) {
5984 addr_type = ADDR_LE_DEV_PUBLIC;
5985 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5986 addr_type = ADDR_LE_DEV_RANDOM;
5987 } else {
5988 BT_ERR("Ignoring invalid connection parameters");
5989 continue;
5990 }
5991
5992 min = le16_to_cpu(param->min_interval);
5993 max = le16_to_cpu(param->max_interval);
5994 latency = le16_to_cpu(param->latency);
5995 timeout = le16_to_cpu(param->timeout);
5996
5997 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5998 min, max, latency, timeout);
5999
6000 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6001 BT_ERR("Ignoring invalid connection parameters");
6002 continue;
6003 }
6004
6005 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6006 addr_type);
6007 if (!hci_param) {
6008 BT_ERR("Failed to add connection parameters");
6009 continue;
6010 }
6011
6012 hci_param->conn_min_interval = min;
6013 hci_param->conn_max_interval = max;
6014 hci_param->conn_latency = latency;
6015 hci_param->supervision_timeout = timeout;
6016 }
6017
6018 hci_dev_unlock(hdev);
6019
6020 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
6021 }
6022
6023 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6024 void *data, u16 len)
6025 {
6026 struct mgmt_cp_set_external_config *cp = data;
6027 bool changed;
6028 int err;
6029
6030 BT_DBG("%s", hdev->name);
6031
6032 if (hdev_is_powered(hdev))
6033 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6034 MGMT_STATUS_REJECTED);
6035
6036 if (cp->config != 0x00 && cp->config != 0x01)
6037 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6038 MGMT_STATUS_INVALID_PARAMS);
6039
6040 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6041 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6042 MGMT_STATUS_NOT_SUPPORTED);
6043
6044 hci_dev_lock(hdev);
6045
6046 if (cp->config)
6047 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6048 &hdev->dev_flags);
6049 else
6050 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6051 &hdev->dev_flags);
6052
6053 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6054 if (err < 0)
6055 goto unlock;
6056
6057 if (!changed)
6058 goto unlock;
6059
6060 err = new_options(hdev, sk);
6061
6062 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6063 mgmt_index_removed(hdev);
6064
6065 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6066 set_bit(HCI_CONFIG, &hdev->dev_flags);
6067 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6068
6069 queue_work(hdev->req_workqueue, &hdev->power_on);
6070 } else {
6071 set_bit(HCI_RAW, &hdev->flags);
6072 mgmt_index_added(hdev);
6073 }
6074 }
6075
6076 unlock:
6077 hci_dev_unlock(hdev);
6078 return err;
6079 }
6080
6081 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6082 void *data, u16 len)
6083 {
6084 struct mgmt_cp_set_public_address *cp = data;
6085 bool changed;
6086 int err;
6087
6088 BT_DBG("%s", hdev->name);
6089
6090 if (hdev_is_powered(hdev))
6091 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6092 MGMT_STATUS_REJECTED);
6093
6094 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6095 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6096 MGMT_STATUS_INVALID_PARAMS);
6097
6098 if (!hdev->set_bdaddr)
6099 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6100 MGMT_STATUS_NOT_SUPPORTED);
6101
6102 hci_dev_lock(hdev);
6103
6104 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6105 bacpy(&hdev->public_addr, &cp->bdaddr);
6106
6107 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6108 if (err < 0)
6109 goto unlock;
6110
6111 if (!changed)
6112 goto unlock;
6113
6114 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6115 err = new_options(hdev, sk);
6116
6117 if (is_configured(hdev)) {
6118 mgmt_index_removed(hdev);
6119
6120 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6121
6122 set_bit(HCI_CONFIG, &hdev->dev_flags);
6123 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6124
6125 queue_work(hdev->req_workqueue, &hdev->power_on);
6126 }
6127
6128 unlock:
6129 hci_dev_unlock(hdev);
6130 return err;
6131 }
6132
6133 static const struct hci_mgmt_handler mgmt_handlers[] = {
6134 { NULL }, /* 0x0000 (no command) */
6135 { read_version, false, MGMT_READ_VERSION_SIZE },
6136 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6137 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6138 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6139 { set_powered, false, MGMT_SETTING_SIZE },
6140 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6141 { set_connectable, false, MGMT_SETTING_SIZE },
6142 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6143 { set_bondable, false, MGMT_SETTING_SIZE },
6144 { set_link_security, false, MGMT_SETTING_SIZE },
6145 { set_ssp, false, MGMT_SETTING_SIZE },
6146 { set_hs, false, MGMT_SETTING_SIZE },
6147 { set_le, false, MGMT_SETTING_SIZE },
6148 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6149 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6150 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6151 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6152 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6153 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6154 { disconnect, false, MGMT_DISCONNECT_SIZE },
6155 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6156 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6157 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6158 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6159 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6160 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6161 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6162 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6163 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6164 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6165 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6166 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6167 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6168 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6169 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6170 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6171 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6172 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6173 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6174 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6175 { set_advertising, false, MGMT_SETTING_SIZE },
6176 { set_bredr, false, MGMT_SETTING_SIZE },
6177 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6178 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6179 { set_secure_conn, false, MGMT_SETTING_SIZE },
6180 { set_debug_keys, false, MGMT_SETTING_SIZE },
6181 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6182 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6183 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6184 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6185 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6186 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6187 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6188 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6189 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6190 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6191 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6192 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6193 };
6194
6195 int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6196 struct msghdr *msg, size_t msglen)
6197 {
6198 void *buf;
6199 u8 *cp;
6200 struct mgmt_hdr *hdr;
6201 u16 opcode, index, len;
6202 struct hci_dev *hdev = NULL;
6203 const struct hci_mgmt_handler *handler;
6204 int err;
6205
6206 BT_DBG("got %zu bytes", msglen);
6207
6208 if (msglen < sizeof(*hdr))
6209 return -EINVAL;
6210
6211 buf = kmalloc(msglen, GFP_KERNEL);
6212 if (!buf)
6213 return -ENOMEM;
6214
6215 if (memcpy_from_msg(buf, msg, msglen)) {
6216 err = -EFAULT;
6217 goto done;
6218 }
6219
6220 hdr = buf;
6221 opcode = __le16_to_cpu(hdr->opcode);
6222 index = __le16_to_cpu(hdr->index);
6223 len = __le16_to_cpu(hdr->len);
6224
6225 if (len != msglen - sizeof(*hdr)) {
6226 err = -EINVAL;
6227 goto done;
6228 }
6229
6230 if (index != MGMT_INDEX_NONE) {
6231 hdev = hci_dev_get(index);
6232 if (!hdev) {
6233 err = cmd_status(sk, index, opcode,
6234 MGMT_STATUS_INVALID_INDEX);
6235 goto done;
6236 }
6237
6238 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6239 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6240 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6241 err = cmd_status(sk, index, opcode,
6242 MGMT_STATUS_INVALID_INDEX);
6243 goto done;
6244 }
6245
6246 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6247 opcode != MGMT_OP_READ_CONFIG_INFO &&
6248 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6249 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6250 err = cmd_status(sk, index, opcode,
6251 MGMT_STATUS_INVALID_INDEX);
6252 goto done;
6253 }
6254 }
6255
6256 if (opcode >= chan->handler_count ||
6257 chan->handlers[opcode].func == NULL) {
6258 BT_DBG("Unknown op %u", opcode);
6259 err = cmd_status(sk, index, opcode,
6260 MGMT_STATUS_UNKNOWN_COMMAND);
6261 goto done;
6262 }
6263
6264 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6265 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6266 err = cmd_status(sk, index, opcode,
6267 MGMT_STATUS_INVALID_INDEX);
6268 goto done;
6269 }
6270
6271 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6272 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6273 err = cmd_status(sk, index, opcode,
6274 MGMT_STATUS_INVALID_INDEX);
6275 goto done;
6276 }
6277
6278 handler = &chan->handlers[opcode];
6279
6280 if ((handler->var_len && len < handler->data_len) ||
6281 (!handler->var_len && len != handler->data_len)) {
6282 err = cmd_status(sk, index, opcode,
6283 MGMT_STATUS_INVALID_PARAMS);
6284 goto done;
6285 }
6286
6287 if (hdev)
6288 mgmt_init_hdev(sk, hdev);
6289
6290 cp = buf + sizeof(*hdr);
6291
6292 err = handler->func(sk, hdev, cp, len);
6293 if (err < 0)
6294 goto done;
6295
6296 err = msglen;
6297
6298 done:
6299 if (hdev)
6300 hci_dev_put(hdev);
6301
6302 kfree(buf);
6303 return err;
6304 }
6305
6306 void mgmt_index_added(struct hci_dev *hdev)
6307 {
6308 if (hdev->dev_type != HCI_BREDR)
6309 return;
6310
6311 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6312 return;
6313
6314 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6315 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6316 else
6317 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6318 }
6319
6320 void mgmt_index_removed(struct hci_dev *hdev)
6321 {
6322 u8 status = MGMT_STATUS_INVALID_INDEX;
6323
6324 if (hdev->dev_type != HCI_BREDR)
6325 return;
6326
6327 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6328 return;
6329
6330 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6331
6332 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6333 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6334 else
6335 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6336 }
6337
6338 /* This function requires the caller holds hdev->lock */
6339 static void restart_le_actions(struct hci_request *req)
6340 {
6341 struct hci_dev *hdev = req->hdev;
6342 struct hci_conn_params *p;
6343
6344 list_for_each_entry(p, &hdev->le_conn_params, list) {
6345 /* Needed for AUTO_OFF case where might not "really"
6346 * have been powered off.
6347 */
6348 list_del_init(&p->action);
6349
6350 switch (p->auto_connect) {
6351 case HCI_AUTO_CONN_DIRECT:
6352 case HCI_AUTO_CONN_ALWAYS:
6353 list_add(&p->action, &hdev->pend_le_conns);
6354 break;
6355 case HCI_AUTO_CONN_REPORT:
6356 list_add(&p->action, &hdev->pend_le_reports);
6357 break;
6358 default:
6359 break;
6360 }
6361 }
6362
6363 __hci_update_background_scan(req);
6364 }
6365
6366 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6367 {
6368 struct cmd_lookup match = { NULL, hdev };
6369
6370 BT_DBG("status 0x%02x", status);
6371
6372 if (!status) {
6373 /* Register the available SMP channels (BR/EDR and LE) only
6374 * when successfully powering on the controller. This late
6375 * registration is required so that LE SMP can clearly
6376 * decide if the public address or static address is used.
6377 */
6378 smp_register(hdev);
6379 }
6380
6381 hci_dev_lock(hdev);
6382
6383 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6384
6385 new_settings(hdev, match.sk);
6386
6387 hci_dev_unlock(hdev);
6388
6389 if (match.sk)
6390 sock_put(match.sk);
6391 }
6392
6393 static int powered_update_hci(struct hci_dev *hdev)
6394 {
6395 struct hci_request req;
6396 u8 link_sec;
6397
6398 hci_req_init(&req, hdev);
6399
6400 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6401 !lmp_host_ssp_capable(hdev)) {
6402 u8 mode = 0x01;
6403
6404 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6405
6406 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6407 u8 support = 0x01;
6408
6409 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6410 sizeof(support), &support);
6411 }
6412 }
6413
6414 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6415 lmp_bredr_capable(hdev)) {
6416 struct hci_cp_write_le_host_supported cp;
6417
6418 cp.le = 0x01;
6419 cp.simul = 0x00;
6420
6421 /* Check first if we already have the right
6422 * host state (host features set)
6423 */
6424 if (cp.le != lmp_host_le_capable(hdev) ||
6425 cp.simul != lmp_host_le_br_capable(hdev))
6426 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6427 sizeof(cp), &cp);
6428 }
6429
6430 if (lmp_le_capable(hdev)) {
6431 /* Make sure the controller has a good default for
6432 * advertising data. This also applies to the case
6433 * where BR/EDR was toggled during the AUTO_OFF phase.
6434 */
6435 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6436 update_adv_data(&req);
6437 update_scan_rsp_data(&req);
6438 }
6439
6440 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6441 enable_advertising(&req);
6442
6443 restart_le_actions(&req);
6444 }
6445
6446 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6447 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6448 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6449 sizeof(link_sec), &link_sec);
6450
6451 if (lmp_bredr_capable(hdev)) {
6452 write_fast_connectable(&req, false);
6453 __hci_update_page_scan(&req);
6454 update_class(&req);
6455 update_name(&req);
6456 update_eir(&req);
6457 }
6458
6459 return hci_req_run(&req, powered_complete);
6460 }
6461
6462 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6463 {
6464 struct cmd_lookup match = { NULL, hdev };
6465 u8 status, zero_cod[] = { 0, 0, 0 };
6466 int err;
6467
6468 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6469 return 0;
6470
6471 if (powered) {
6472 if (powered_update_hci(hdev) == 0)
6473 return 0;
6474
6475 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6476 &match);
6477 goto new_settings;
6478 }
6479
6480 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6481
6482 /* If the power off is because of hdev unregistration let
6483 * use the appropriate INVALID_INDEX status. Otherwise use
6484 * NOT_POWERED. We cover both scenarios here since later in
6485 * mgmt_index_removed() any hci_conn callbacks will have already
6486 * been triggered, potentially causing misleading DISCONNECTED
6487 * status responses.
6488 */
6489 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6490 status = MGMT_STATUS_INVALID_INDEX;
6491 else
6492 status = MGMT_STATUS_NOT_POWERED;
6493
6494 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6495
6496 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6497 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6498 zero_cod, sizeof(zero_cod), NULL);
6499
6500 new_settings:
6501 err = new_settings(hdev, match.sk);
6502
6503 if (match.sk)
6504 sock_put(match.sk);
6505
6506 return err;
6507 }
6508
6509 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6510 {
6511 struct pending_cmd *cmd;
6512 u8 status;
6513
6514 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6515 if (!cmd)
6516 return;
6517
6518 if (err == -ERFKILL)
6519 status = MGMT_STATUS_RFKILLED;
6520 else
6521 status = MGMT_STATUS_FAILED;
6522
6523 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6524
6525 mgmt_pending_remove(cmd);
6526 }
6527
6528 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6529 {
6530 struct hci_request req;
6531
6532 hci_dev_lock(hdev);
6533
6534 /* When discoverable timeout triggers, then just make sure
6535 * the limited discoverable flag is cleared. Even in the case
6536 * of a timeout triggered from general discoverable, it is
6537 * safe to unconditionally clear the flag.
6538 */
6539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6540 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6541
6542 hci_req_init(&req, hdev);
6543 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6544 u8 scan = SCAN_PAGE;
6545 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6546 sizeof(scan), &scan);
6547 }
6548 update_class(&req);
6549 update_adv_data(&req);
6550 hci_req_run(&req, NULL);
6551
6552 hdev->discov_timeout = 0;
6553
6554 new_settings(hdev, NULL);
6555
6556 hci_dev_unlock(hdev);
6557 }
6558
6559 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6560 bool persistent)
6561 {
6562 struct mgmt_ev_new_link_key ev;
6563
6564 memset(&ev, 0, sizeof(ev));
6565
6566 ev.store_hint = persistent;
6567 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6568 ev.key.addr.type = BDADDR_BREDR;
6569 ev.key.type = key->type;
6570 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6571 ev.key.pin_len = key->pin_len;
6572
6573 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6574 }
6575
6576 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6577 {
6578 switch (ltk->type) {
6579 case SMP_LTK:
6580 case SMP_LTK_SLAVE:
6581 if (ltk->authenticated)
6582 return MGMT_LTK_AUTHENTICATED;
6583 return MGMT_LTK_UNAUTHENTICATED;
6584 case SMP_LTK_P256:
6585 if (ltk->authenticated)
6586 return MGMT_LTK_P256_AUTH;
6587 return MGMT_LTK_P256_UNAUTH;
6588 case SMP_LTK_P256_DEBUG:
6589 return MGMT_LTK_P256_DEBUG;
6590 }
6591
6592 return MGMT_LTK_UNAUTHENTICATED;
6593 }
6594
6595 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6596 {
6597 struct mgmt_ev_new_long_term_key ev;
6598
6599 memset(&ev, 0, sizeof(ev));
6600
6601 /* Devices using resolvable or non-resolvable random addresses
6602 * without providing an indentity resolving key don't require
6603 * to store long term keys. Their addresses will change the
6604 * next time around.
6605 *
6606 * Only when a remote device provides an identity address
6607 * make sure the long term key is stored. If the remote
6608 * identity is known, the long term keys are internally
6609 * mapped to the identity address. So allow static random
6610 * and public addresses here.
6611 */
6612 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6613 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6614 ev.store_hint = 0x00;
6615 else
6616 ev.store_hint = persistent;
6617
6618 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6619 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6620 ev.key.type = mgmt_ltk_type(key);
6621 ev.key.enc_size = key->enc_size;
6622 ev.key.ediv = key->ediv;
6623 ev.key.rand = key->rand;
6624
6625 if (key->type == SMP_LTK)
6626 ev.key.master = 1;
6627
6628 memcpy(ev.key.val, key->val, sizeof(key->val));
6629
6630 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6631 }
6632
6633 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6634 {
6635 struct mgmt_ev_new_irk ev;
6636
6637 memset(&ev, 0, sizeof(ev));
6638
6639 /* For identity resolving keys from devices that are already
6640 * using a public address or static random address, do not
6641 * ask for storing this key. The identity resolving key really
6642 * is only mandatory for devices using resovlable random
6643 * addresses.
6644 *
6645 * Storing all identity resolving keys has the downside that
6646 * they will be also loaded on next boot of they system. More
6647 * identity resolving keys, means more time during scanning is
6648 * needed to actually resolve these addresses.
6649 */
6650 if (bacmp(&irk->rpa, BDADDR_ANY))
6651 ev.store_hint = 0x01;
6652 else
6653 ev.store_hint = 0x00;
6654
6655 bacpy(&ev.rpa, &irk->rpa);
6656 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6657 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6658 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6659
6660 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6661 }
6662
6663 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6664 bool persistent)
6665 {
6666 struct mgmt_ev_new_csrk ev;
6667
6668 memset(&ev, 0, sizeof(ev));
6669
6670 /* Devices using resolvable or non-resolvable random addresses
6671 * without providing an indentity resolving key don't require
6672 * to store signature resolving keys. Their addresses will change
6673 * the next time around.
6674 *
6675 * Only when a remote device provides an identity address
6676 * make sure the signature resolving key is stored. So allow
6677 * static random and public addresses here.
6678 */
6679 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6680 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6681 ev.store_hint = 0x00;
6682 else
6683 ev.store_hint = persistent;
6684
6685 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6686 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6687 ev.key.type = csrk->type;
6688 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6689
6690 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6691 }
6692
6693 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6694 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6695 u16 max_interval, u16 latency, u16 timeout)
6696 {
6697 struct mgmt_ev_new_conn_param ev;
6698
6699 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6700 return;
6701
6702 memset(&ev, 0, sizeof(ev));
6703 bacpy(&ev.addr.bdaddr, bdaddr);
6704 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6705 ev.store_hint = store_hint;
6706 ev.min_interval = cpu_to_le16(min_interval);
6707 ev.max_interval = cpu_to_le16(max_interval);
6708 ev.latency = cpu_to_le16(latency);
6709 ev.timeout = cpu_to_le16(timeout);
6710
6711 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6712 }
6713
6714 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6715 u8 data_len)
6716 {
6717 eir[eir_len++] = sizeof(type) + data_len;
6718 eir[eir_len++] = type;
6719 memcpy(&eir[eir_len], data, data_len);
6720 eir_len += data_len;
6721
6722 return eir_len;
6723 }
6724
6725 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6726 u32 flags, u8 *name, u8 name_len)
6727 {
6728 char buf[512];
6729 struct mgmt_ev_device_connected *ev = (void *) buf;
6730 u16 eir_len = 0;
6731
6732 bacpy(&ev->addr.bdaddr, &conn->dst);
6733 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6734
6735 ev->flags = __cpu_to_le32(flags);
6736
6737 /* We must ensure that the EIR Data fields are ordered and
6738 * unique. Keep it simple for now and avoid the problem by not
6739 * adding any BR/EDR data to the LE adv.
6740 */
6741 if (conn->le_adv_data_len > 0) {
6742 memcpy(&ev->eir[eir_len],
6743 conn->le_adv_data, conn->le_adv_data_len);
6744 eir_len = conn->le_adv_data_len;
6745 } else {
6746 if (name_len > 0)
6747 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6748 name, name_len);
6749
6750 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6751 eir_len = eir_append_data(ev->eir, eir_len,
6752 EIR_CLASS_OF_DEV,
6753 conn->dev_class, 3);
6754 }
6755
6756 ev->eir_len = cpu_to_le16(eir_len);
6757
6758 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6759 sizeof(*ev) + eir_len, NULL);
6760 }
6761
6762 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6763 {
6764 struct sock **sk = data;
6765
6766 cmd->cmd_complete(cmd, 0);
6767
6768 *sk = cmd->sk;
6769 sock_hold(*sk);
6770
6771 mgmt_pending_remove(cmd);
6772 }
6773
6774 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6775 {
6776 struct hci_dev *hdev = data;
6777 struct mgmt_cp_unpair_device *cp = cmd->param;
6778
6779 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6780
6781 cmd->cmd_complete(cmd, 0);
6782 mgmt_pending_remove(cmd);
6783 }
6784
6785 bool mgmt_powering_down(struct hci_dev *hdev)
6786 {
6787 struct pending_cmd *cmd;
6788 struct mgmt_mode *cp;
6789
6790 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6791 if (!cmd)
6792 return false;
6793
6794 cp = cmd->param;
6795 if (!cp->val)
6796 return true;
6797
6798 return false;
6799 }
6800
6801 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6802 u8 link_type, u8 addr_type, u8 reason,
6803 bool mgmt_connected)
6804 {
6805 struct mgmt_ev_device_disconnected ev;
6806 struct sock *sk = NULL;
6807
6808 /* The connection is still in hci_conn_hash so test for 1
6809 * instead of 0 to know if this is the last one.
6810 */
6811 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6812 cancel_delayed_work(&hdev->power_off);
6813 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6814 }
6815
6816 if (!mgmt_connected)
6817 return;
6818
6819 if (link_type != ACL_LINK && link_type != LE_LINK)
6820 return;
6821
6822 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6823
6824 bacpy(&ev.addr.bdaddr, bdaddr);
6825 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6826 ev.reason = reason;
6827
6828 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6829
6830 if (sk)
6831 sock_put(sk);
6832
6833 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6834 hdev);
6835 }
6836
6837 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6838 u8 link_type, u8 addr_type, u8 status)
6839 {
6840 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6841 struct mgmt_cp_disconnect *cp;
6842 struct pending_cmd *cmd;
6843
6844 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6845 hdev);
6846
6847 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6848 if (!cmd)
6849 return;
6850
6851 cp = cmd->param;
6852
6853 if (bacmp(bdaddr, &cp->addr.bdaddr))
6854 return;
6855
6856 if (cp->addr.type != bdaddr_type)
6857 return;
6858
6859 cmd->cmd_complete(cmd, mgmt_status(status));
6860 mgmt_pending_remove(cmd);
6861 }
6862
6863 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6864 u8 addr_type, u8 status)
6865 {
6866 struct mgmt_ev_connect_failed ev;
6867
6868 /* The connection is still in hci_conn_hash so test for 1
6869 * instead of 0 to know if this is the last one.
6870 */
6871 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6872 cancel_delayed_work(&hdev->power_off);
6873 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6874 }
6875
6876 bacpy(&ev.addr.bdaddr, bdaddr);
6877 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6878 ev.status = mgmt_status(status);
6879
6880 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6881 }
6882
6883 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6884 {
6885 struct mgmt_ev_pin_code_request ev;
6886
6887 bacpy(&ev.addr.bdaddr, bdaddr);
6888 ev.addr.type = BDADDR_BREDR;
6889 ev.secure = secure;
6890
6891 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6892 }
6893
6894 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6895 u8 status)
6896 {
6897 struct pending_cmd *cmd;
6898
6899 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6900 if (!cmd)
6901 return;
6902
6903 cmd->cmd_complete(cmd, mgmt_status(status));
6904 mgmt_pending_remove(cmd);
6905 }
6906
6907 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6908 u8 status)
6909 {
6910 struct pending_cmd *cmd;
6911
6912 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6913 if (!cmd)
6914 return;
6915
6916 cmd->cmd_complete(cmd, mgmt_status(status));
6917 mgmt_pending_remove(cmd);
6918 }
6919
6920 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6921 u8 link_type, u8 addr_type, u32 value,
6922 u8 confirm_hint)
6923 {
6924 struct mgmt_ev_user_confirm_request ev;
6925
6926 BT_DBG("%s", hdev->name);
6927
6928 bacpy(&ev.addr.bdaddr, bdaddr);
6929 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6930 ev.confirm_hint = confirm_hint;
6931 ev.value = cpu_to_le32(value);
6932
6933 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6934 NULL);
6935 }
6936
6937 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6938 u8 link_type, u8 addr_type)
6939 {
6940 struct mgmt_ev_user_passkey_request ev;
6941
6942 BT_DBG("%s", hdev->name);
6943
6944 bacpy(&ev.addr.bdaddr, bdaddr);
6945 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6946
6947 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6948 NULL);
6949 }
6950
6951 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6952 u8 link_type, u8 addr_type, u8 status,
6953 u8 opcode)
6954 {
6955 struct pending_cmd *cmd;
6956
6957 cmd = mgmt_pending_find(opcode, hdev);
6958 if (!cmd)
6959 return -ENOENT;
6960
6961 cmd->cmd_complete(cmd, mgmt_status(status));
6962 mgmt_pending_remove(cmd);
6963
6964 return 0;
6965 }
6966
6967 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6968 u8 link_type, u8 addr_type, u8 status)
6969 {
6970 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6971 status, MGMT_OP_USER_CONFIRM_REPLY);
6972 }
6973
6974 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6975 u8 link_type, u8 addr_type, u8 status)
6976 {
6977 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6978 status,
6979 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6980 }
6981
6982 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6983 u8 link_type, u8 addr_type, u8 status)
6984 {
6985 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6986 status, MGMT_OP_USER_PASSKEY_REPLY);
6987 }
6988
6989 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6990 u8 link_type, u8 addr_type, u8 status)
6991 {
6992 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6993 status,
6994 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6995 }
6996
6997 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6998 u8 link_type, u8 addr_type, u32 passkey,
6999 u8 entered)
7000 {
7001 struct mgmt_ev_passkey_notify ev;
7002
7003 BT_DBG("%s", hdev->name);
7004
7005 bacpy(&ev.addr.bdaddr, bdaddr);
7006 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7007 ev.passkey = __cpu_to_le32(passkey);
7008 ev.entered = entered;
7009
7010 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7011 }
7012
7013 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7014 {
7015 struct mgmt_ev_auth_failed ev;
7016 struct pending_cmd *cmd;
7017 u8 status = mgmt_status(hci_status);
7018
7019 bacpy(&ev.addr.bdaddr, &conn->dst);
7020 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7021 ev.status = status;
7022
7023 cmd = find_pairing(conn);
7024
7025 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7026 cmd ? cmd->sk : NULL);
7027
7028 if (cmd) {
7029 cmd->cmd_complete(cmd, status);
7030 mgmt_pending_remove(cmd);
7031 }
7032 }
7033
7034 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7035 {
7036 struct cmd_lookup match = { NULL, hdev };
7037 bool changed;
7038
7039 if (status) {
7040 u8 mgmt_err = mgmt_status(status);
7041 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7042 cmd_status_rsp, &mgmt_err);
7043 return;
7044 }
7045
7046 if (test_bit(HCI_AUTH, &hdev->flags))
7047 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7048 &hdev->dev_flags);
7049 else
7050 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7051 &hdev->dev_flags);
7052
7053 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7054 &match);
7055
7056 if (changed)
7057 new_settings(hdev, match.sk);
7058
7059 if (match.sk)
7060 sock_put(match.sk);
7061 }
7062
7063 static void clear_eir(struct hci_request *req)
7064 {
7065 struct hci_dev *hdev = req->hdev;
7066 struct hci_cp_write_eir cp;
7067
7068 if (!lmp_ext_inq_capable(hdev))
7069 return;
7070
7071 memset(hdev->eir, 0, sizeof(hdev->eir));
7072
7073 memset(&cp, 0, sizeof(cp));
7074
7075 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7076 }
7077
7078 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7079 {
7080 struct cmd_lookup match = { NULL, hdev };
7081 struct hci_request req;
7082 bool changed = false;
7083
7084 if (status) {
7085 u8 mgmt_err = mgmt_status(status);
7086
7087 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7088 &hdev->dev_flags)) {
7089 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7090 new_settings(hdev, NULL);
7091 }
7092
7093 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7094 &mgmt_err);
7095 return;
7096 }
7097
7098 if (enable) {
7099 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7100 } else {
7101 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7102 if (!changed)
7103 changed = test_and_clear_bit(HCI_HS_ENABLED,
7104 &hdev->dev_flags);
7105 else
7106 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7107 }
7108
7109 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7110
7111 if (changed)
7112 new_settings(hdev, match.sk);
7113
7114 if (match.sk)
7115 sock_put(match.sk);
7116
7117 hci_req_init(&req, hdev);
7118
7119 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7120 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7121 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7122 sizeof(enable), &enable);
7123 update_eir(&req);
7124 } else {
7125 clear_eir(&req);
7126 }
7127
7128 hci_req_run(&req, NULL);
7129 }
7130
7131 static void sk_lookup(struct pending_cmd *cmd, void *data)
7132 {
7133 struct cmd_lookup *match = data;
7134
7135 if (match->sk == NULL) {
7136 match->sk = cmd->sk;
7137 sock_hold(match->sk);
7138 }
7139 }
7140
7141 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7142 u8 status)
7143 {
7144 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7145
7146 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7147 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7148 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7149
7150 if (!status)
7151 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7152 NULL);
7153
7154 if (match.sk)
7155 sock_put(match.sk);
7156 }
7157
7158 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7159 {
7160 struct mgmt_cp_set_local_name ev;
7161 struct pending_cmd *cmd;
7162
7163 if (status)
7164 return;
7165
7166 memset(&ev, 0, sizeof(ev));
7167 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7168 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7169
7170 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7171 if (!cmd) {
7172 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7173
7174 /* If this is a HCI command related to powering on the
7175 * HCI dev don't send any mgmt signals.
7176 */
7177 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7178 return;
7179 }
7180
7181 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7182 cmd ? cmd->sk : NULL);
7183 }
7184
7185 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7186 u8 *rand192, u8 *hash256, u8 *rand256,
7187 u8 status)
7188 {
7189 struct pending_cmd *cmd;
7190
7191 BT_DBG("%s status %u", hdev->name, status);
7192
7193 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7194 if (!cmd)
7195 return;
7196
7197 if (status) {
7198 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7199 mgmt_status(status));
7200 } else {
7201 struct mgmt_rp_read_local_oob_data rp;
7202 size_t rp_size = sizeof(rp);
7203
7204 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7205 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7206
7207 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7208 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7209 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7210 } else {
7211 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7212 }
7213
7214 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7215 &rp, rp_size);
7216 }
7217
7218 mgmt_pending_remove(cmd);
7219 }
7220
7221 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7222 {
7223 int i;
7224
7225 for (i = 0; i < uuid_count; i++) {
7226 if (!memcmp(uuid, uuids[i], 16))
7227 return true;
7228 }
7229
7230 return false;
7231 }
7232
7233 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7234 {
7235 u16 parsed = 0;
7236
7237 while (parsed < eir_len) {
7238 u8 field_len = eir[0];
7239 u8 uuid[16];
7240 int i;
7241
7242 if (field_len == 0)
7243 break;
7244
7245 if (eir_len - parsed < field_len + 1)
7246 break;
7247
7248 switch (eir[1]) {
7249 case EIR_UUID16_ALL:
7250 case EIR_UUID16_SOME:
7251 for (i = 0; i + 3 <= field_len; i += 2) {
7252 memcpy(uuid, bluetooth_base_uuid, 16);
7253 uuid[13] = eir[i + 3];
7254 uuid[12] = eir[i + 2];
7255 if (has_uuid(uuid, uuid_count, uuids))
7256 return true;
7257 }
7258 break;
7259 case EIR_UUID32_ALL:
7260 case EIR_UUID32_SOME:
7261 for (i = 0; i + 5 <= field_len; i += 4) {
7262 memcpy(uuid, bluetooth_base_uuid, 16);
7263 uuid[15] = eir[i + 5];
7264 uuid[14] = eir[i + 4];
7265 uuid[13] = eir[i + 3];
7266 uuid[12] = eir[i + 2];
7267 if (has_uuid(uuid, uuid_count, uuids))
7268 return true;
7269 }
7270 break;
7271 case EIR_UUID128_ALL:
7272 case EIR_UUID128_SOME:
7273 for (i = 0; i + 17 <= field_len; i += 16) {
7274 memcpy(uuid, eir + i + 2, 16);
7275 if (has_uuid(uuid, uuid_count, uuids))
7276 return true;
7277 }
7278 break;
7279 }
7280
7281 parsed += field_len + 1;
7282 eir += field_len + 1;
7283 }
7284
7285 return false;
7286 }
7287
7288 static void restart_le_scan(struct hci_dev *hdev)
7289 {
7290 /* If controller is not scanning we are done. */
7291 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7292 return;
7293
7294 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7295 hdev->discovery.scan_start +
7296 hdev->discovery.scan_duration))
7297 return;
7298
7299 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7300 DISCOV_LE_RESTART_DELAY);
7301 }
7302
7303 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7304 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7305 {
7306 /* If a RSSI threshold has been specified, and
7307 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7308 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7309 * is set, let it through for further processing, as we might need to
7310 * restart the scan.
7311 *
7312 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7313 * the results are also dropped.
7314 */
7315 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7316 (rssi == HCI_RSSI_INVALID ||
7317 (rssi < hdev->discovery.rssi &&
7318 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7319 return false;
7320
7321 if (hdev->discovery.uuid_count != 0) {
7322 /* If a list of UUIDs is provided in filter, results with no
7323 * matching UUID should be dropped.
7324 */
7325 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7326 hdev->discovery.uuids) &&
7327 !eir_has_uuids(scan_rsp, scan_rsp_len,
7328 hdev->discovery.uuid_count,
7329 hdev->discovery.uuids))
7330 return false;
7331 }
7332
7333 /* If duplicate filtering does not report RSSI changes, then restart
7334 * scanning to ensure updated result with updated RSSI values.
7335 */
7336 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7337 restart_le_scan(hdev);
7338
7339 /* Validate RSSI value against the RSSI threshold once more. */
7340 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7341 rssi < hdev->discovery.rssi)
7342 return false;
7343 }
7344
7345 return true;
7346 }
7347
7348 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7349 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7350 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7351 {
7352 char buf[512];
7353 struct mgmt_ev_device_found *ev = (void *)buf;
7354 size_t ev_size;
7355
7356 /* Don't send events for a non-kernel initiated discovery. With
7357 * LE one exception is if we have pend_le_reports > 0 in which
7358 * case we're doing passive scanning and want these events.
7359 */
7360 if (!hci_discovery_active(hdev)) {
7361 if (link_type == ACL_LINK)
7362 return;
7363 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7364 return;
7365 }
7366
7367 if (hdev->discovery.result_filtering) {
7368 /* We are using service discovery */
7369 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7370 scan_rsp_len))
7371 return;
7372 }
7373
7374 /* Make sure that the buffer is big enough. The 5 extra bytes
7375 * are for the potential CoD field.
7376 */
7377 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7378 return;
7379
7380 memset(buf, 0, sizeof(buf));
7381
7382 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7383 * RSSI value was reported as 0 when not available. This behavior
7384 * is kept when using device discovery. This is required for full
7385 * backwards compatibility with the API.
7386 *
7387 * However when using service discovery, the value 127 will be
7388 * returned when the RSSI is not available.
7389 */
7390 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7391 link_type == ACL_LINK)
7392 rssi = 0;
7393
7394 bacpy(&ev->addr.bdaddr, bdaddr);
7395 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7396 ev->rssi = rssi;
7397 ev->flags = cpu_to_le32(flags);
7398
7399 if (eir_len > 0)
7400 /* Copy EIR or advertising data into event */
7401 memcpy(ev->eir, eir, eir_len);
7402
7403 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7404 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7405 dev_class, 3);
7406
7407 if (scan_rsp_len > 0)
7408 /* Append scan response data to event */
7409 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7410
7411 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7412 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7413
7414 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7415 }
7416
7417 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7418 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7419 {
7420 struct mgmt_ev_device_found *ev;
7421 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7422 u16 eir_len;
7423
7424 ev = (struct mgmt_ev_device_found *) buf;
7425
7426 memset(buf, 0, sizeof(buf));
7427
7428 bacpy(&ev->addr.bdaddr, bdaddr);
7429 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7430 ev->rssi = rssi;
7431
7432 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7433 name_len);
7434
7435 ev->eir_len = cpu_to_le16(eir_len);
7436
7437 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7438 }
7439
7440 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7441 {
7442 struct mgmt_ev_discovering ev;
7443
7444 BT_DBG("%s discovering %u", hdev->name, discovering);
7445
7446 memset(&ev, 0, sizeof(ev));
7447 ev.type = hdev->discovery.type;
7448 ev.discovering = discovering;
7449
7450 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7451 }
7452
7453 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7454 {
7455 BT_DBG("%s status %u", hdev->name, status);
7456 }
7457
7458 void mgmt_reenable_advertising(struct hci_dev *hdev)
7459 {
7460 struct hci_request req;
7461
7462 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7463 return;
7464
7465 hci_req_init(&req, hdev);
7466 enable_advertising(&req);
7467 hci_req_run(&req, adv_enable_complete);
7468 }
7469
7470 static struct hci_mgmt_chan chan = {
7471 .channel = HCI_CHANNEL_CONTROL,
7472 .handler_count = ARRAY_SIZE(mgmt_handlers),
7473 .handlers = mgmt_handlers,
7474 };
7475
7476 int mgmt_init(void)
7477 {
7478 return hci_mgmt_chan_register(&chan);
7479 }
7480
7481 void mgmt_exit(void)
7482 {
7483 hci_mgmt_chan_unregister(&chan);
7484 }
This page took 0.264417 seconds and 4 git commands to generate.