Bluetooth: Add Device Added and Device Removed management events
[deliverable/linux.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 };
92
93 static const u16 mgmt_events[] = {
94 MGMT_EV_CONTROLLER_ERROR,
95 MGMT_EV_INDEX_ADDED,
96 MGMT_EV_INDEX_REMOVED,
97 MGMT_EV_NEW_SETTINGS,
98 MGMT_EV_CLASS_OF_DEV_CHANGED,
99 MGMT_EV_LOCAL_NAME_CHANGED,
100 MGMT_EV_NEW_LINK_KEY,
101 MGMT_EV_NEW_LONG_TERM_KEY,
102 MGMT_EV_DEVICE_CONNECTED,
103 MGMT_EV_DEVICE_DISCONNECTED,
104 MGMT_EV_CONNECT_FAILED,
105 MGMT_EV_PIN_CODE_REQUEST,
106 MGMT_EV_USER_CONFIRM_REQUEST,
107 MGMT_EV_USER_PASSKEY_REQUEST,
108 MGMT_EV_AUTH_FAILED,
109 MGMT_EV_DEVICE_FOUND,
110 MGMT_EV_DISCOVERING,
111 MGMT_EV_DEVICE_BLOCKED,
112 MGMT_EV_DEVICE_UNBLOCKED,
113 MGMT_EV_DEVICE_UNPAIRED,
114 MGMT_EV_PASSKEY_NOTIFY,
115 MGMT_EV_NEW_IRK,
116 MGMT_EV_NEW_CSRK,
117 MGMT_EV_DEVICE_ADDED,
118 MGMT_EV_DEVICE_REMOVED,
119 };
120
121 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
122
123 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
124 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
125
126 struct pending_cmd {
127 struct list_head list;
128 u16 opcode;
129 int index;
130 void *param;
131 struct sock *sk;
132 void *user_data;
133 };
134
135 /* HCI to MGMT error code conversion table */
136 static u8 mgmt_status_table[] = {
137 MGMT_STATUS_SUCCESS,
138 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
139 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
140 MGMT_STATUS_FAILED, /* Hardware Failure */
141 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
142 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
143 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
144 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
145 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
146 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
147 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
148 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
149 MGMT_STATUS_BUSY, /* Command Disallowed */
150 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
151 MGMT_STATUS_REJECTED, /* Rejected Security */
152 MGMT_STATUS_REJECTED, /* Rejected Personal */
153 MGMT_STATUS_TIMEOUT, /* Host Timeout */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
155 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
156 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
157 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
158 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
159 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
160 MGMT_STATUS_BUSY, /* Repeated Attempts */
161 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
162 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
164 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
165 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
166 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
167 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
168 MGMT_STATUS_FAILED, /* Unspecified Error */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
170 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
171 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
172 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
173 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
174 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
175 MGMT_STATUS_FAILED, /* Unit Link Key Used */
176 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
177 MGMT_STATUS_TIMEOUT, /* Instant Passed */
178 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
179 MGMT_STATUS_FAILED, /* Transaction Collision */
180 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
181 MGMT_STATUS_REJECTED, /* QoS Rejected */
182 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
183 MGMT_STATUS_REJECTED, /* Insufficient Security */
184 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
185 MGMT_STATUS_BUSY, /* Role Switch Pending */
186 MGMT_STATUS_FAILED, /* Slot Violation */
187 MGMT_STATUS_FAILED, /* Role Switch Failed */
188 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
189 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
190 MGMT_STATUS_BUSY, /* Host Busy Pairing */
191 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
192 MGMT_STATUS_BUSY, /* Controller Busy */
193 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
194 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
195 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
196 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
197 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
198 };
199
200 static u8 mgmt_status(u8 hci_status)
201 {
202 if (hci_status < ARRAY_SIZE(mgmt_status_table))
203 return mgmt_status_table[hci_status];
204
205 return MGMT_STATUS_FAILED;
206 }
207
208 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
209 {
210 struct sk_buff *skb;
211 struct mgmt_hdr *hdr;
212 struct mgmt_ev_cmd_status *ev;
213 int err;
214
215 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
216
217 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
218 if (!skb)
219 return -ENOMEM;
220
221 hdr = (void *) skb_put(skb, sizeof(*hdr));
222
223 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
224 hdr->index = cpu_to_le16(index);
225 hdr->len = cpu_to_le16(sizeof(*ev));
226
227 ev = (void *) skb_put(skb, sizeof(*ev));
228 ev->status = status;
229 ev->opcode = cpu_to_le16(cmd);
230
231 err = sock_queue_rcv_skb(sk, skb);
232 if (err < 0)
233 kfree_skb(skb);
234
235 return err;
236 }
237
238 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
239 void *rp, size_t rp_len)
240 {
241 struct sk_buff *skb;
242 struct mgmt_hdr *hdr;
243 struct mgmt_ev_cmd_complete *ev;
244 int err;
245
246 BT_DBG("sock %p", sk);
247
248 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
249 if (!skb)
250 return -ENOMEM;
251
252 hdr = (void *) skb_put(skb, sizeof(*hdr));
253
254 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
255 hdr->index = cpu_to_le16(index);
256 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
257
258 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
259 ev->opcode = cpu_to_le16(cmd);
260 ev->status = status;
261
262 if (rp)
263 memcpy(ev->data, rp, rp_len);
264
265 err = sock_queue_rcv_skb(sk, skb);
266 if (err < 0)
267 kfree_skb(skb);
268
269 return err;
270 }
271
272 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
273 u16 data_len)
274 {
275 struct mgmt_rp_read_version rp;
276
277 BT_DBG("sock %p", sk);
278
279 rp.version = MGMT_VERSION;
280 rp.revision = cpu_to_le16(MGMT_REVISION);
281
282 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
283 sizeof(rp));
284 }
285
286 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
287 u16 data_len)
288 {
289 struct mgmt_rp_read_commands *rp;
290 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
291 const u16 num_events = ARRAY_SIZE(mgmt_events);
292 __le16 *opcode;
293 size_t rp_size;
294 int i, err;
295
296 BT_DBG("sock %p", sk);
297
298 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
299
300 rp = kmalloc(rp_size, GFP_KERNEL);
301 if (!rp)
302 return -ENOMEM;
303
304 rp->num_commands = cpu_to_le16(num_commands);
305 rp->num_events = cpu_to_le16(num_events);
306
307 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
308 put_unaligned_le16(mgmt_commands[i], opcode);
309
310 for (i = 0; i < num_events; i++, opcode++)
311 put_unaligned_le16(mgmt_events[i], opcode);
312
313 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
314 rp_size);
315 kfree(rp);
316
317 return err;
318 }
319
320 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
321 u16 data_len)
322 {
323 struct mgmt_rp_read_index_list *rp;
324 struct hci_dev *d;
325 size_t rp_len;
326 u16 count;
327 int err;
328
329 BT_DBG("sock %p", sk);
330
331 read_lock(&hci_dev_list_lock);
332
333 count = 0;
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (d->dev_type == HCI_BREDR)
336 count++;
337 }
338
339 rp_len = sizeof(*rp) + (2 * count);
340 rp = kmalloc(rp_len, GFP_ATOMIC);
341 if (!rp) {
342 read_unlock(&hci_dev_list_lock);
343 return -ENOMEM;
344 }
345
346 count = 0;
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (test_bit(HCI_SETUP, &d->dev_flags))
349 continue;
350
351 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
352 continue;
353
354 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
355 continue;
356
357 if (d->dev_type == HCI_BREDR) {
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
360 }
361 }
362
363 rp->num_controllers = cpu_to_le16(count);
364 rp_len = sizeof(*rp) + (2 * count);
365
366 read_unlock(&hci_dev_list_lock);
367
368 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
369 rp_len);
370
371 kfree(rp);
372
373 return err;
374 }
375
376 static u32 get_supported_settings(struct hci_dev *hdev)
377 {
378 u32 settings = 0;
379
380 settings |= MGMT_SETTING_POWERED;
381 settings |= MGMT_SETTING_PAIRABLE;
382 settings |= MGMT_SETTING_DEBUG_KEYS;
383
384 if (lmp_bredr_capable(hdev)) {
385 settings |= MGMT_SETTING_CONNECTABLE;
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
391
392 if (lmp_ssp_capable(hdev)) {
393 settings |= MGMT_SETTING_SSP;
394 settings |= MGMT_SETTING_HS;
395 }
396
397 if (lmp_sc_capable(hdev) ||
398 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
399 settings |= MGMT_SETTING_SECURE_CONN;
400 }
401
402 if (lmp_le_capable(hdev)) {
403 settings |= MGMT_SETTING_LE;
404 settings |= MGMT_SETTING_ADVERTISING;
405 settings |= MGMT_SETTING_PRIVACY;
406 }
407
408 return settings;
409 }
410
411 static u32 get_current_settings(struct hci_dev *hdev)
412 {
413 u32 settings = 0;
414
415 if (hdev_is_powered(hdev))
416 settings |= MGMT_SETTING_POWERED;
417
418 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_CONNECTABLE;
420
421 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
422 settings |= MGMT_SETTING_FAST_CONNECTABLE;
423
424 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
425 settings |= MGMT_SETTING_DISCOVERABLE;
426
427 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
428 settings |= MGMT_SETTING_PAIRABLE;
429
430 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_BREDR;
432
433 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_LE;
435
436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
437 settings |= MGMT_SETTING_LINK_SECURITY;
438
439 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
440 settings |= MGMT_SETTING_SSP;
441
442 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
443 settings |= MGMT_SETTING_HS;
444
445 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
446 settings |= MGMT_SETTING_ADVERTISING;
447
448 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
449 settings |= MGMT_SETTING_SECURE_CONN;
450
451 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
452 settings |= MGMT_SETTING_DEBUG_KEYS;
453
454 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
455 settings |= MGMT_SETTING_PRIVACY;
456
457 return settings;
458 }
459
460 #define PNP_INFO_SVCLASS_ID 0x1200
461
462 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
463 {
464 u8 *ptr = data, *uuids_start = NULL;
465 struct bt_uuid *uuid;
466
467 if (len < 4)
468 return ptr;
469
470 list_for_each_entry(uuid, &hdev->uuids, list) {
471 u16 uuid16;
472
473 if (uuid->size != 16)
474 continue;
475
476 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
477 if (uuid16 < 0x1100)
478 continue;
479
480 if (uuid16 == PNP_INFO_SVCLASS_ID)
481 continue;
482
483 if (!uuids_start) {
484 uuids_start = ptr;
485 uuids_start[0] = 1;
486 uuids_start[1] = EIR_UUID16_ALL;
487 ptr += 2;
488 }
489
490 /* Stop if not enough space to put next UUID */
491 if ((ptr - data) + sizeof(u16) > len) {
492 uuids_start[1] = EIR_UUID16_SOME;
493 break;
494 }
495
496 *ptr++ = (uuid16 & 0x00ff);
497 *ptr++ = (uuid16 & 0xff00) >> 8;
498 uuids_start[0] += sizeof(uuid16);
499 }
500
501 return ptr;
502 }
503
504 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
505 {
506 u8 *ptr = data, *uuids_start = NULL;
507 struct bt_uuid *uuid;
508
509 if (len < 6)
510 return ptr;
511
512 list_for_each_entry(uuid, &hdev->uuids, list) {
513 if (uuid->size != 32)
514 continue;
515
516 if (!uuids_start) {
517 uuids_start = ptr;
518 uuids_start[0] = 1;
519 uuids_start[1] = EIR_UUID32_ALL;
520 ptr += 2;
521 }
522
523 /* Stop if not enough space to put next UUID */
524 if ((ptr - data) + sizeof(u32) > len) {
525 uuids_start[1] = EIR_UUID32_SOME;
526 break;
527 }
528
529 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
530 ptr += sizeof(u32);
531 uuids_start[0] += sizeof(u32);
532 }
533
534 return ptr;
535 }
536
537 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
538 {
539 u8 *ptr = data, *uuids_start = NULL;
540 struct bt_uuid *uuid;
541
542 if (len < 18)
543 return ptr;
544
545 list_for_each_entry(uuid, &hdev->uuids, list) {
546 if (uuid->size != 128)
547 continue;
548
549 if (!uuids_start) {
550 uuids_start = ptr;
551 uuids_start[0] = 1;
552 uuids_start[1] = EIR_UUID128_ALL;
553 ptr += 2;
554 }
555
556 /* Stop if not enough space to put next UUID */
557 if ((ptr - data) + 16 > len) {
558 uuids_start[1] = EIR_UUID128_SOME;
559 break;
560 }
561
562 memcpy(ptr, uuid->uuid, 16);
563 ptr += 16;
564 uuids_start[0] += 16;
565 }
566
567 return ptr;
568 }
569
570 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
571 {
572 struct pending_cmd *cmd;
573
574 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
575 if (cmd->opcode == opcode)
576 return cmd;
577 }
578
579 return NULL;
580 }
581
582 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
583 struct hci_dev *hdev,
584 const void *data)
585 {
586 struct pending_cmd *cmd;
587
588 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
589 if (cmd->user_data != data)
590 continue;
591 if (cmd->opcode == opcode)
592 return cmd;
593 }
594
595 return NULL;
596 }
597
598 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
599 {
600 u8 ad_len = 0;
601 size_t name_len;
602
603 name_len = strlen(hdev->dev_name);
604 if (name_len > 0) {
605 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
606
607 if (name_len > max_len) {
608 name_len = max_len;
609 ptr[1] = EIR_NAME_SHORT;
610 } else
611 ptr[1] = EIR_NAME_COMPLETE;
612
613 ptr[0] = name_len + 1;
614
615 memcpy(ptr + 2, hdev->dev_name, name_len);
616
617 ad_len += (name_len + 2);
618 ptr += (name_len + 2);
619 }
620
621 return ad_len;
622 }
623
624 static void update_scan_rsp_data(struct hci_request *req)
625 {
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_le_set_scan_rsp_data cp;
628 u8 len;
629
630 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
631 return;
632
633 memset(&cp, 0, sizeof(cp));
634
635 len = create_scan_rsp_data(hdev, cp.data);
636
637 if (hdev->scan_rsp_data_len == len &&
638 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
639 return;
640
641 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
642 hdev->scan_rsp_data_len = len;
643
644 cp.length = len;
645
646 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
647 }
648
649 static u8 get_adv_discov_flags(struct hci_dev *hdev)
650 {
651 struct pending_cmd *cmd;
652
653 /* If there's a pending mgmt command the flags will not yet have
654 * their final values, so check for this first.
655 */
656 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
657 if (cmd) {
658 struct mgmt_mode *cp = cmd->param;
659 if (cp->val == 0x01)
660 return LE_AD_GENERAL;
661 else if (cp->val == 0x02)
662 return LE_AD_LIMITED;
663 } else {
664 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
665 return LE_AD_LIMITED;
666 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
667 return LE_AD_GENERAL;
668 }
669
670 return 0;
671 }
672
673 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
674 {
675 u8 ad_len = 0, flags = 0;
676
677 flags |= get_adv_discov_flags(hdev);
678
679 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
680 flags |= LE_AD_NO_BREDR;
681
682 if (flags) {
683 BT_DBG("adv flags 0x%02x", flags);
684
685 ptr[0] = 2;
686 ptr[1] = EIR_FLAGS;
687 ptr[2] = flags;
688
689 ad_len += 3;
690 ptr += 3;
691 }
692
693 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
694 ptr[0] = 2;
695 ptr[1] = EIR_TX_POWER;
696 ptr[2] = (u8) hdev->adv_tx_power;
697
698 ad_len += 3;
699 ptr += 3;
700 }
701
702 return ad_len;
703 }
704
705 static void update_adv_data(struct hci_request *req)
706 {
707 struct hci_dev *hdev = req->hdev;
708 struct hci_cp_le_set_adv_data cp;
709 u8 len;
710
711 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
712 return;
713
714 memset(&cp, 0, sizeof(cp));
715
716 len = create_adv_data(hdev, cp.data);
717
718 if (hdev->adv_data_len == len &&
719 memcmp(cp.data, hdev->adv_data, len) == 0)
720 return;
721
722 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
723 hdev->adv_data_len = len;
724
725 cp.length = len;
726
727 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
728 }
729
730 static void create_eir(struct hci_dev *hdev, u8 *data)
731 {
732 u8 *ptr = data;
733 size_t name_len;
734
735 name_len = strlen(hdev->dev_name);
736
737 if (name_len > 0) {
738 /* EIR Data type */
739 if (name_len > 48) {
740 name_len = 48;
741 ptr[1] = EIR_NAME_SHORT;
742 } else
743 ptr[1] = EIR_NAME_COMPLETE;
744
745 /* EIR Data length */
746 ptr[0] = name_len + 1;
747
748 memcpy(ptr + 2, hdev->dev_name, name_len);
749
750 ptr += (name_len + 2);
751 }
752
753 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
754 ptr[0] = 2;
755 ptr[1] = EIR_TX_POWER;
756 ptr[2] = (u8) hdev->inq_tx_power;
757
758 ptr += 3;
759 }
760
761 if (hdev->devid_source > 0) {
762 ptr[0] = 9;
763 ptr[1] = EIR_DEVICE_ID;
764
765 put_unaligned_le16(hdev->devid_source, ptr + 2);
766 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
767 put_unaligned_le16(hdev->devid_product, ptr + 6);
768 put_unaligned_le16(hdev->devid_version, ptr + 8);
769
770 ptr += 10;
771 }
772
773 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
774 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
775 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
776 }
777
778 static void update_eir(struct hci_request *req)
779 {
780 struct hci_dev *hdev = req->hdev;
781 struct hci_cp_write_eir cp;
782
783 if (!hdev_is_powered(hdev))
784 return;
785
786 if (!lmp_ext_inq_capable(hdev))
787 return;
788
789 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
790 return;
791
792 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
793 return;
794
795 memset(&cp, 0, sizeof(cp));
796
797 create_eir(hdev, cp.data);
798
799 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
800 return;
801
802 memcpy(hdev->eir, cp.data, sizeof(cp.data));
803
804 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
805 }
806
807 static u8 get_service_classes(struct hci_dev *hdev)
808 {
809 struct bt_uuid *uuid;
810 u8 val = 0;
811
812 list_for_each_entry(uuid, &hdev->uuids, list)
813 val |= uuid->svc_hint;
814
815 return val;
816 }
817
818 static void update_class(struct hci_request *req)
819 {
820 struct hci_dev *hdev = req->hdev;
821 u8 cod[3];
822
823 BT_DBG("%s", hdev->name);
824
825 if (!hdev_is_powered(hdev))
826 return;
827
828 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
829 return;
830
831 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
832 return;
833
834 cod[0] = hdev->minor_class;
835 cod[1] = hdev->major_class;
836 cod[2] = get_service_classes(hdev);
837
838 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
839 cod[1] |= 0x20;
840
841 if (memcmp(cod, hdev->dev_class, 3) == 0)
842 return;
843
844 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
845 }
846
847 static bool get_connectable(struct hci_dev *hdev)
848 {
849 struct pending_cmd *cmd;
850
851 /* If there's a pending mgmt command the flag will not yet have
852 * it's final value, so check for this first.
853 */
854 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
855 if (cmd) {
856 struct mgmt_mode *cp = cmd->param;
857 return cp->val;
858 }
859
860 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
861 }
862
863 static void enable_advertising(struct hci_request *req)
864 {
865 struct hci_dev *hdev = req->hdev;
866 struct hci_cp_le_set_adv_param cp;
867 u8 own_addr_type, enable = 0x01;
868 bool connectable;
869
870 /* Clear the HCI_ADVERTISING bit temporarily so that the
871 * hci_update_random_address knows that it's safe to go ahead
872 * and write a new random address. The flag will be set back on
873 * as soon as the SET_ADV_ENABLE HCI command completes.
874 */
875 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
876
877 connectable = get_connectable(hdev);
878
879 /* Set require_privacy to true only when non-connectable
880 * advertising is used. In that case it is fine to use a
881 * non-resolvable private address.
882 */
883 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
884 return;
885
886 memset(&cp, 0, sizeof(cp));
887 cp.min_interval = cpu_to_le16(0x0800);
888 cp.max_interval = cpu_to_le16(0x0800);
889 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
890 cp.own_address_type = own_addr_type;
891 cp.channel_map = hdev->le_adv_channel_map;
892
893 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
894
895 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
896 }
897
898 static void disable_advertising(struct hci_request *req)
899 {
900 u8 enable = 0x00;
901
902 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
903 }
904
905 static void service_cache_off(struct work_struct *work)
906 {
907 struct hci_dev *hdev = container_of(work, struct hci_dev,
908 service_cache.work);
909 struct hci_request req;
910
911 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
912 return;
913
914 hci_req_init(&req, hdev);
915
916 hci_dev_lock(hdev);
917
918 update_eir(&req);
919 update_class(&req);
920
921 hci_dev_unlock(hdev);
922
923 hci_req_run(&req, NULL);
924 }
925
926 static void rpa_expired(struct work_struct *work)
927 {
928 struct hci_dev *hdev = container_of(work, struct hci_dev,
929 rpa_expired.work);
930 struct hci_request req;
931
932 BT_DBG("");
933
934 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
935
936 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
937 hci_conn_num(hdev, LE_LINK) > 0)
938 return;
939
940 /* The generation of a new RPA and programming it into the
941 * controller happens in the enable_advertising() function.
942 */
943
944 hci_req_init(&req, hdev);
945
946 disable_advertising(&req);
947 enable_advertising(&req);
948
949 hci_req_run(&req, NULL);
950 }
951
952 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
953 {
954 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
955 return;
956
957 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
958 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
959
960 /* Non-mgmt controlled devices get this bit set
961 * implicitly so that pairing works for them, however
962 * for mgmt we require user-space to explicitly enable
963 * it
964 */
965 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
966 }
967
968 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
969 void *data, u16 data_len)
970 {
971 struct mgmt_rp_read_info rp;
972
973 BT_DBG("sock %p %s", sk, hdev->name);
974
975 hci_dev_lock(hdev);
976
977 memset(&rp, 0, sizeof(rp));
978
979 bacpy(&rp.bdaddr, &hdev->bdaddr);
980
981 rp.version = hdev->hci_ver;
982 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
983
984 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
985 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
986
987 memcpy(rp.dev_class, hdev->dev_class, 3);
988
989 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
990 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
991
992 hci_dev_unlock(hdev);
993
994 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
995 sizeof(rp));
996 }
997
998 static void mgmt_pending_free(struct pending_cmd *cmd)
999 {
1000 sock_put(cmd->sk);
1001 kfree(cmd->param);
1002 kfree(cmd);
1003 }
1004
1005 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1006 struct hci_dev *hdev, void *data,
1007 u16 len)
1008 {
1009 struct pending_cmd *cmd;
1010
1011 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1012 if (!cmd)
1013 return NULL;
1014
1015 cmd->opcode = opcode;
1016 cmd->index = hdev->id;
1017
1018 cmd->param = kmalloc(len, GFP_KERNEL);
1019 if (!cmd->param) {
1020 kfree(cmd);
1021 return NULL;
1022 }
1023
1024 if (data)
1025 memcpy(cmd->param, data, len);
1026
1027 cmd->sk = sk;
1028 sock_hold(sk);
1029
1030 list_add(&cmd->list, &hdev->mgmt_pending);
1031
1032 return cmd;
1033 }
1034
1035 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1036 void (*cb)(struct pending_cmd *cmd,
1037 void *data),
1038 void *data)
1039 {
1040 struct pending_cmd *cmd, *tmp;
1041
1042 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1043 if (opcode > 0 && cmd->opcode != opcode)
1044 continue;
1045
1046 cb(cmd, data);
1047 }
1048 }
1049
1050 static void mgmt_pending_remove(struct pending_cmd *cmd)
1051 {
1052 list_del(&cmd->list);
1053 mgmt_pending_free(cmd);
1054 }
1055
1056 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1057 {
1058 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1059
1060 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1061 sizeof(settings));
1062 }
1063
1064 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1065 {
1066 BT_DBG("%s status 0x%02x", hdev->name, status);
1067
1068 if (hci_conn_count(hdev) == 0) {
1069 cancel_delayed_work(&hdev->power_off);
1070 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1071 }
1072 }
1073
1074 static void hci_stop_discovery(struct hci_request *req)
1075 {
1076 struct hci_dev *hdev = req->hdev;
1077 struct hci_cp_remote_name_req_cancel cp;
1078 struct inquiry_entry *e;
1079
1080 switch (hdev->discovery.state) {
1081 case DISCOVERY_FINDING:
1082 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1083 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1084 } else {
1085 cancel_delayed_work(&hdev->le_scan_disable);
1086 hci_req_add_le_scan_disable(req);
1087 }
1088
1089 break;
1090
1091 case DISCOVERY_RESOLVING:
1092 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1093 NAME_PENDING);
1094 if (!e)
1095 return;
1096
1097 bacpy(&cp.bdaddr, &e->data.bdaddr);
1098 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1099 &cp);
1100
1101 break;
1102
1103 default:
1104 /* Passive scanning */
1105 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1106 hci_req_add_le_scan_disable(req);
1107 break;
1108 }
1109 }
1110
1111 static int clean_up_hci_state(struct hci_dev *hdev)
1112 {
1113 struct hci_request req;
1114 struct hci_conn *conn;
1115
1116 hci_req_init(&req, hdev);
1117
1118 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1119 test_bit(HCI_PSCAN, &hdev->flags)) {
1120 u8 scan = 0x00;
1121 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1122 }
1123
1124 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1125 disable_advertising(&req);
1126
1127 hci_stop_discovery(&req);
1128
1129 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1130 struct hci_cp_disconnect dc;
1131 struct hci_cp_reject_conn_req rej;
1132
1133 switch (conn->state) {
1134 case BT_CONNECTED:
1135 case BT_CONFIG:
1136 dc.handle = cpu_to_le16(conn->handle);
1137 dc.reason = 0x15; /* Terminated due to Power Off */
1138 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1139 break;
1140 case BT_CONNECT:
1141 if (conn->type == LE_LINK)
1142 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1143 0, NULL);
1144 else if (conn->type == ACL_LINK)
1145 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1146 6, &conn->dst);
1147 break;
1148 case BT_CONNECT2:
1149 bacpy(&rej.bdaddr, &conn->dst);
1150 rej.reason = 0x15; /* Terminated due to Power Off */
1151 if (conn->type == ACL_LINK)
1152 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1153 sizeof(rej), &rej);
1154 else if (conn->type == SCO_LINK)
1155 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1156 sizeof(rej), &rej);
1157 break;
1158 }
1159 }
1160
1161 return hci_req_run(&req, clean_up_hci_complete);
1162 }
1163
1164 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1165 u16 len)
1166 {
1167 struct mgmt_mode *cp = data;
1168 struct pending_cmd *cmd;
1169 int err;
1170
1171 BT_DBG("request for %s", hdev->name);
1172
1173 if (cp->val != 0x00 && cp->val != 0x01)
1174 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1175 MGMT_STATUS_INVALID_PARAMS);
1176
1177 hci_dev_lock(hdev);
1178
1179 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1181 MGMT_STATUS_BUSY);
1182 goto failed;
1183 }
1184
1185 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1186 cancel_delayed_work(&hdev->power_off);
1187
1188 if (cp->val) {
1189 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1190 data, len);
1191 err = mgmt_powered(hdev, 1);
1192 goto failed;
1193 }
1194 }
1195
1196 if (!!cp->val == hdev_is_powered(hdev)) {
1197 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1198 goto failed;
1199 }
1200
1201 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1202 if (!cmd) {
1203 err = -ENOMEM;
1204 goto failed;
1205 }
1206
1207 if (cp->val) {
1208 queue_work(hdev->req_workqueue, &hdev->power_on);
1209 err = 0;
1210 } else {
1211 /* Disconnect connections, stop scans, etc */
1212 err = clean_up_hci_state(hdev);
1213 if (!err)
1214 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1215 HCI_POWER_OFF_TIMEOUT);
1216
1217 /* ENODATA means there were no HCI commands queued */
1218 if (err == -ENODATA) {
1219 cancel_delayed_work(&hdev->power_off);
1220 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1221 err = 0;
1222 }
1223 }
1224
1225 failed:
1226 hci_dev_unlock(hdev);
1227 return err;
1228 }
1229
1230 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1231 struct sock *skip_sk)
1232 {
1233 struct sk_buff *skb;
1234 struct mgmt_hdr *hdr;
1235
1236 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1237 if (!skb)
1238 return -ENOMEM;
1239
1240 hdr = (void *) skb_put(skb, sizeof(*hdr));
1241 hdr->opcode = cpu_to_le16(event);
1242 if (hdev)
1243 hdr->index = cpu_to_le16(hdev->id);
1244 else
1245 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1246 hdr->len = cpu_to_le16(data_len);
1247
1248 if (data)
1249 memcpy(skb_put(skb, data_len), data, data_len);
1250
1251 /* Time stamp */
1252 __net_timestamp(skb);
1253
1254 hci_send_to_control(skb, skip_sk);
1255 kfree_skb(skb);
1256
1257 return 0;
1258 }
1259
1260 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1261 {
1262 __le32 ev;
1263
1264 ev = cpu_to_le32(get_current_settings(hdev));
1265
1266 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1267 }
1268
1269 struct cmd_lookup {
1270 struct sock *sk;
1271 struct hci_dev *hdev;
1272 u8 mgmt_status;
1273 };
1274
1275 static void settings_rsp(struct pending_cmd *cmd, void *data)
1276 {
1277 struct cmd_lookup *match = data;
1278
1279 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1280
1281 list_del(&cmd->list);
1282
1283 if (match->sk == NULL) {
1284 match->sk = cmd->sk;
1285 sock_hold(match->sk);
1286 }
1287
1288 mgmt_pending_free(cmd);
1289 }
1290
1291 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1292 {
1293 u8 *status = data;
1294
1295 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1296 mgmt_pending_remove(cmd);
1297 }
1298
1299 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1300 {
1301 if (!lmp_bredr_capable(hdev))
1302 return MGMT_STATUS_NOT_SUPPORTED;
1303 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1304 return MGMT_STATUS_REJECTED;
1305 else
1306 return MGMT_STATUS_SUCCESS;
1307 }
1308
1309 static u8 mgmt_le_support(struct hci_dev *hdev)
1310 {
1311 if (!lmp_le_capable(hdev))
1312 return MGMT_STATUS_NOT_SUPPORTED;
1313 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1314 return MGMT_STATUS_REJECTED;
1315 else
1316 return MGMT_STATUS_SUCCESS;
1317 }
1318
1319 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1320 {
1321 struct pending_cmd *cmd;
1322 struct mgmt_mode *cp;
1323 struct hci_request req;
1324 bool changed;
1325
1326 BT_DBG("status 0x%02x", status);
1327
1328 hci_dev_lock(hdev);
1329
1330 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1331 if (!cmd)
1332 goto unlock;
1333
1334 if (status) {
1335 u8 mgmt_err = mgmt_status(status);
1336 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1337 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1338 goto remove_cmd;
1339 }
1340
1341 cp = cmd->param;
1342 if (cp->val) {
1343 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1344 &hdev->dev_flags);
1345
1346 if (hdev->discov_timeout > 0) {
1347 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1348 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1349 to);
1350 }
1351 } else {
1352 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1353 &hdev->dev_flags);
1354 }
1355
1356 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1357
1358 if (changed)
1359 new_settings(hdev, cmd->sk);
1360
1361 /* When the discoverable mode gets changed, make sure
1362 * that class of device has the limited discoverable
1363 * bit correctly set.
1364 */
1365 hci_req_init(&req, hdev);
1366 update_class(&req);
1367 hci_req_run(&req, NULL);
1368
1369 remove_cmd:
1370 mgmt_pending_remove(cmd);
1371
1372 unlock:
1373 hci_dev_unlock(hdev);
1374 }
1375
1376 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1377 u16 len)
1378 {
1379 struct mgmt_cp_set_discoverable *cp = data;
1380 struct pending_cmd *cmd;
1381 struct hci_request req;
1382 u16 timeout;
1383 u8 scan;
1384 int err;
1385
1386 BT_DBG("request for %s", hdev->name);
1387
1388 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1389 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1390 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1392
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1396
1397 timeout = __le16_to_cpu(cp->timeout);
1398
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1401 */
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1406
1407 hci_dev_lock(hdev);
1408
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1412 goto failed;
1413 }
1414
1415 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 MGMT_STATUS_BUSY);
1419 goto failed;
1420 }
1421
1422 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1423 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1425 goto failed;
1426 }
1427
1428 if (!hdev_is_powered(hdev)) {
1429 bool changed = false;
1430
1431 /* Setting limited discoverable when powered off is
1432 * not a valid operation since it requires a timeout
1433 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1434 */
1435 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1436 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1437 changed = true;
1438 }
1439
1440 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1441 if (err < 0)
1442 goto failed;
1443
1444 if (changed)
1445 err = new_settings(hdev, sk);
1446
1447 goto failed;
1448 }
1449
1450 /* If the current mode is the same, then just update the timeout
1451 * value with the new value. And if only the timeout gets updated,
1452 * then no need for any HCI transactions.
1453 */
1454 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1455 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1456 &hdev->dev_flags)) {
1457 cancel_delayed_work(&hdev->discov_off);
1458 hdev->discov_timeout = timeout;
1459
1460 if (cp->val && hdev->discov_timeout > 0) {
1461 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1462 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1463 to);
1464 }
1465
1466 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1467 goto failed;
1468 }
1469
1470 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1471 if (!cmd) {
1472 err = -ENOMEM;
1473 goto failed;
1474 }
1475
1476 /* Cancel any potential discoverable timeout that might be
1477 * still active and store new timeout value. The arming of
1478 * the timeout happens in the complete handler.
1479 */
1480 cancel_delayed_work(&hdev->discov_off);
1481 hdev->discov_timeout = timeout;
1482
1483 /* Limited discoverable mode */
1484 if (cp->val == 0x02)
1485 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1486 else
1487 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1488
1489 hci_req_init(&req, hdev);
1490
1491 /* The procedure for LE-only controllers is much simpler - just
1492 * update the advertising data.
1493 */
1494 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1495 goto update_ad;
1496
1497 scan = SCAN_PAGE;
1498
1499 if (cp->val) {
1500 struct hci_cp_write_current_iac_lap hci_cp;
1501
1502 if (cp->val == 0x02) {
1503 /* Limited discoverable mode */
1504 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1505 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1506 hci_cp.iac_lap[1] = 0x8b;
1507 hci_cp.iac_lap[2] = 0x9e;
1508 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1509 hci_cp.iac_lap[4] = 0x8b;
1510 hci_cp.iac_lap[5] = 0x9e;
1511 } else {
1512 /* General discoverable mode */
1513 hci_cp.num_iac = 1;
1514 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1515 hci_cp.iac_lap[1] = 0x8b;
1516 hci_cp.iac_lap[2] = 0x9e;
1517 }
1518
1519 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1520 (hci_cp.num_iac * 3) + 1, &hci_cp);
1521
1522 scan |= SCAN_INQUIRY;
1523 } else {
1524 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1525 }
1526
1527 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1528
1529 update_ad:
1530 update_adv_data(&req);
1531
1532 err = hci_req_run(&req, set_discoverable_complete);
1533 if (err < 0)
1534 mgmt_pending_remove(cmd);
1535
1536 failed:
1537 hci_dev_unlock(hdev);
1538 return err;
1539 }
1540
1541 static void write_fast_connectable(struct hci_request *req, bool enable)
1542 {
1543 struct hci_dev *hdev = req->hdev;
1544 struct hci_cp_write_page_scan_activity acp;
1545 u8 type;
1546
1547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1548 return;
1549
1550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1551 return;
1552
1553 if (enable) {
1554 type = PAGE_SCAN_TYPE_INTERLACED;
1555
1556 /* 160 msec page scan interval */
1557 acp.interval = cpu_to_le16(0x0100);
1558 } else {
1559 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1560
1561 /* default 1.28 sec page scan */
1562 acp.interval = cpu_to_le16(0x0800);
1563 }
1564
1565 acp.window = cpu_to_le16(0x0012);
1566
1567 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1568 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1569 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1570 sizeof(acp), &acp);
1571
1572 if (hdev->page_scan_type != type)
1573 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1574 }
1575
1576 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1577 {
1578 struct pending_cmd *cmd;
1579 struct mgmt_mode *cp;
1580 bool changed;
1581
1582 BT_DBG("status 0x%02x", status);
1583
1584 hci_dev_lock(hdev);
1585
1586 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1587 if (!cmd)
1588 goto unlock;
1589
1590 if (status) {
1591 u8 mgmt_err = mgmt_status(status);
1592 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1593 goto remove_cmd;
1594 }
1595
1596 cp = cmd->param;
1597 if (cp->val)
1598 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1599 else
1600 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1601
1602 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1603
1604 if (changed)
1605 new_settings(hdev, cmd->sk);
1606
1607 remove_cmd:
1608 mgmt_pending_remove(cmd);
1609
1610 unlock:
1611 hci_dev_unlock(hdev);
1612 }
1613
1614 static int set_connectable_update_settings(struct hci_dev *hdev,
1615 struct sock *sk, u8 val)
1616 {
1617 bool changed = false;
1618 int err;
1619
1620 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1621 changed = true;
1622
1623 if (val) {
1624 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1625 } else {
1626 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1627 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1628 }
1629
1630 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1631 if (err < 0)
1632 return err;
1633
1634 if (changed)
1635 return new_settings(hdev, sk);
1636
1637 return 0;
1638 }
1639
1640 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1641 u16 len)
1642 {
1643 struct mgmt_mode *cp = data;
1644 struct pending_cmd *cmd;
1645 struct hci_request req;
1646 u8 scan;
1647 int err;
1648
1649 BT_DBG("request for %s", hdev->name);
1650
1651 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1652 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1653 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1654 MGMT_STATUS_REJECTED);
1655
1656 if (cp->val != 0x00 && cp->val != 0x01)
1657 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1658 MGMT_STATUS_INVALID_PARAMS);
1659
1660 hci_dev_lock(hdev);
1661
1662 if (!hdev_is_powered(hdev)) {
1663 err = set_connectable_update_settings(hdev, sk, cp->val);
1664 goto failed;
1665 }
1666
1667 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1668 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1669 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1670 MGMT_STATUS_BUSY);
1671 goto failed;
1672 }
1673
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1675 if (!cmd) {
1676 err = -ENOMEM;
1677 goto failed;
1678 }
1679
1680 hci_req_init(&req, hdev);
1681
1682 /* If BR/EDR is not enabled and we disable advertising as a
1683 * by-product of disabling connectable, we need to update the
1684 * advertising flags.
1685 */
1686 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1687 if (!cp->val) {
1688 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1689 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1690 }
1691 update_adv_data(&req);
1692 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1693 if (cp->val) {
1694 scan = SCAN_PAGE;
1695 } else {
1696 scan = 0;
1697
1698 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1699 hdev->discov_timeout > 0)
1700 cancel_delayed_work(&hdev->discov_off);
1701 }
1702
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1704 }
1705
1706 /* If we're going from non-connectable to connectable or
1707 * vice-versa when fast connectable is enabled ensure that fast
1708 * connectable gets disabled. write_fast_connectable won't do
1709 * anything if the page scan parameters are already what they
1710 * should be.
1711 */
1712 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1713 write_fast_connectable(&req, false);
1714
1715 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1716 hci_conn_num(hdev, LE_LINK) == 0) {
1717 disable_advertising(&req);
1718 enable_advertising(&req);
1719 }
1720
1721 err = hci_req_run(&req, set_connectable_complete);
1722 if (err < 0) {
1723 mgmt_pending_remove(cmd);
1724 if (err == -ENODATA)
1725 err = set_connectable_update_settings(hdev, sk,
1726 cp->val);
1727 goto failed;
1728 }
1729
1730 failed:
1731 hci_dev_unlock(hdev);
1732 return err;
1733 }
1734
1735 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1736 u16 len)
1737 {
1738 struct mgmt_mode *cp = data;
1739 bool changed;
1740 int err;
1741
1742 BT_DBG("request for %s", hdev->name);
1743
1744 if (cp->val != 0x00 && cp->val != 0x01)
1745 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1746 MGMT_STATUS_INVALID_PARAMS);
1747
1748 hci_dev_lock(hdev);
1749
1750 if (cp->val)
1751 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1752 else
1753 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1754
1755 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1756 if (err < 0)
1757 goto unlock;
1758
1759 if (changed)
1760 err = new_settings(hdev, sk);
1761
1762 unlock:
1763 hci_dev_unlock(hdev);
1764 return err;
1765 }
1766
1767 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1768 u16 len)
1769 {
1770 struct mgmt_mode *cp = data;
1771 struct pending_cmd *cmd;
1772 u8 val, status;
1773 int err;
1774
1775 BT_DBG("request for %s", hdev->name);
1776
1777 status = mgmt_bredr_support(hdev);
1778 if (status)
1779 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1780 status);
1781
1782 if (cp->val != 0x00 && cp->val != 0x01)
1783 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1784 MGMT_STATUS_INVALID_PARAMS);
1785
1786 hci_dev_lock(hdev);
1787
1788 if (!hdev_is_powered(hdev)) {
1789 bool changed = false;
1790
1791 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1792 &hdev->dev_flags)) {
1793 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1794 changed = true;
1795 }
1796
1797 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1798 if (err < 0)
1799 goto failed;
1800
1801 if (changed)
1802 err = new_settings(hdev, sk);
1803
1804 goto failed;
1805 }
1806
1807 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1808 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1809 MGMT_STATUS_BUSY);
1810 goto failed;
1811 }
1812
1813 val = !!cp->val;
1814
1815 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1816 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1817 goto failed;
1818 }
1819
1820 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1821 if (!cmd) {
1822 err = -ENOMEM;
1823 goto failed;
1824 }
1825
1826 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1827 if (err < 0) {
1828 mgmt_pending_remove(cmd);
1829 goto failed;
1830 }
1831
1832 failed:
1833 hci_dev_unlock(hdev);
1834 return err;
1835 }
1836
1837 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1838 {
1839 struct mgmt_mode *cp = data;
1840 struct pending_cmd *cmd;
1841 u8 status;
1842 int err;
1843
1844 BT_DBG("request for %s", hdev->name);
1845
1846 status = mgmt_bredr_support(hdev);
1847 if (status)
1848 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1849
1850 if (!lmp_ssp_capable(hdev))
1851 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1852 MGMT_STATUS_NOT_SUPPORTED);
1853
1854 if (cp->val != 0x00 && cp->val != 0x01)
1855 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1856 MGMT_STATUS_INVALID_PARAMS);
1857
1858 hci_dev_lock(hdev);
1859
1860 if (!hdev_is_powered(hdev)) {
1861 bool changed;
1862
1863 if (cp->val) {
1864 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1865 &hdev->dev_flags);
1866 } else {
1867 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1868 &hdev->dev_flags);
1869 if (!changed)
1870 changed = test_and_clear_bit(HCI_HS_ENABLED,
1871 &hdev->dev_flags);
1872 else
1873 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1874 }
1875
1876 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1877 if (err < 0)
1878 goto failed;
1879
1880 if (changed)
1881 err = new_settings(hdev, sk);
1882
1883 goto failed;
1884 }
1885
1886 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1887 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1888 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1889 MGMT_STATUS_BUSY);
1890 goto failed;
1891 }
1892
1893 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1894 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1895 goto failed;
1896 }
1897
1898 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1899 if (!cmd) {
1900 err = -ENOMEM;
1901 goto failed;
1902 }
1903
1904 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1905 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1906 sizeof(cp->val), &cp->val);
1907
1908 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1909 if (err < 0) {
1910 mgmt_pending_remove(cmd);
1911 goto failed;
1912 }
1913
1914 failed:
1915 hci_dev_unlock(hdev);
1916 return err;
1917 }
1918
1919 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1920 {
1921 struct mgmt_mode *cp = data;
1922 bool changed;
1923 u8 status;
1924 int err;
1925
1926 BT_DBG("request for %s", hdev->name);
1927
1928 status = mgmt_bredr_support(hdev);
1929 if (status)
1930 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1931
1932 if (!lmp_ssp_capable(hdev))
1933 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1934 MGMT_STATUS_NOT_SUPPORTED);
1935
1936 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1937 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1938 MGMT_STATUS_REJECTED);
1939
1940 if (cp->val != 0x00 && cp->val != 0x01)
1941 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1942 MGMT_STATUS_INVALID_PARAMS);
1943
1944 hci_dev_lock(hdev);
1945
1946 if (cp->val) {
1947 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1948 } else {
1949 if (hdev_is_powered(hdev)) {
1950 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1951 MGMT_STATUS_REJECTED);
1952 goto unlock;
1953 }
1954
1955 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1956 }
1957
1958 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1959 if (err < 0)
1960 goto unlock;
1961
1962 if (changed)
1963 err = new_settings(hdev, sk);
1964
1965 unlock:
1966 hci_dev_unlock(hdev);
1967 return err;
1968 }
1969
1970 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1971 {
1972 struct cmd_lookup match = { NULL, hdev };
1973
1974 if (status) {
1975 u8 mgmt_err = mgmt_status(status);
1976
1977 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1978 &mgmt_err);
1979 return;
1980 }
1981
1982 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1983
1984 new_settings(hdev, match.sk);
1985
1986 if (match.sk)
1987 sock_put(match.sk);
1988
1989 /* Make sure the controller has a good default for
1990 * advertising data. Restrict the update to when LE
1991 * has actually been enabled. During power on, the
1992 * update in powered_update_hci will take care of it.
1993 */
1994 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1995 struct hci_request req;
1996
1997 hci_dev_lock(hdev);
1998
1999 hci_req_init(&req, hdev);
2000 update_adv_data(&req);
2001 update_scan_rsp_data(&req);
2002 hci_req_run(&req, NULL);
2003
2004 hci_dev_unlock(hdev);
2005 }
2006 }
2007
2008 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2009 {
2010 struct mgmt_mode *cp = data;
2011 struct hci_cp_write_le_host_supported hci_cp;
2012 struct pending_cmd *cmd;
2013 struct hci_request req;
2014 int err;
2015 u8 val, enabled;
2016
2017 BT_DBG("request for %s", hdev->name);
2018
2019 if (!lmp_le_capable(hdev))
2020 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2021 MGMT_STATUS_NOT_SUPPORTED);
2022
2023 if (cp->val != 0x00 && cp->val != 0x01)
2024 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2025 MGMT_STATUS_INVALID_PARAMS);
2026
2027 /* LE-only devices do not allow toggling LE on/off */
2028 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2029 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2030 MGMT_STATUS_REJECTED);
2031
2032 hci_dev_lock(hdev);
2033
2034 val = !!cp->val;
2035 enabled = lmp_host_le_capable(hdev);
2036
2037 if (!hdev_is_powered(hdev) || val == enabled) {
2038 bool changed = false;
2039
2040 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2041 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2042 changed = true;
2043 }
2044
2045 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2046 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2047 changed = true;
2048 }
2049
2050 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2051 if (err < 0)
2052 goto unlock;
2053
2054 if (changed)
2055 err = new_settings(hdev, sk);
2056
2057 goto unlock;
2058 }
2059
2060 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2061 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2063 MGMT_STATUS_BUSY);
2064 goto unlock;
2065 }
2066
2067 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2068 if (!cmd) {
2069 err = -ENOMEM;
2070 goto unlock;
2071 }
2072
2073 hci_req_init(&req, hdev);
2074
2075 memset(&hci_cp, 0, sizeof(hci_cp));
2076
2077 if (val) {
2078 hci_cp.le = val;
2079 hci_cp.simul = lmp_le_br_capable(hdev);
2080 } else {
2081 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2082 disable_advertising(&req);
2083 }
2084
2085 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2086 &hci_cp);
2087
2088 err = hci_req_run(&req, le_enable_complete);
2089 if (err < 0)
2090 mgmt_pending_remove(cmd);
2091
2092 unlock:
2093 hci_dev_unlock(hdev);
2094 return err;
2095 }
2096
2097 /* This is a helper function to test for pending mgmt commands that can
2098 * cause CoD or EIR HCI commands. We can only allow one such pending
2099 * mgmt command at a time since otherwise we cannot easily track what
2100 * the current values are, will be, and based on that calculate if a new
2101 * HCI command needs to be sent and if yes with what value.
2102 */
2103 static bool pending_eir_or_class(struct hci_dev *hdev)
2104 {
2105 struct pending_cmd *cmd;
2106
2107 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2108 switch (cmd->opcode) {
2109 case MGMT_OP_ADD_UUID:
2110 case MGMT_OP_REMOVE_UUID:
2111 case MGMT_OP_SET_DEV_CLASS:
2112 case MGMT_OP_SET_POWERED:
2113 return true;
2114 }
2115 }
2116
2117 return false;
2118 }
2119
2120 static const u8 bluetooth_base_uuid[] = {
2121 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2122 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2123 };
2124
2125 static u8 get_uuid_size(const u8 *uuid)
2126 {
2127 u32 val;
2128
2129 if (memcmp(uuid, bluetooth_base_uuid, 12))
2130 return 128;
2131
2132 val = get_unaligned_le32(&uuid[12]);
2133 if (val > 0xffff)
2134 return 32;
2135
2136 return 16;
2137 }
2138
2139 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2140 {
2141 struct pending_cmd *cmd;
2142
2143 hci_dev_lock(hdev);
2144
2145 cmd = mgmt_pending_find(mgmt_op, hdev);
2146 if (!cmd)
2147 goto unlock;
2148
2149 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2150 hdev->dev_class, 3);
2151
2152 mgmt_pending_remove(cmd);
2153
2154 unlock:
2155 hci_dev_unlock(hdev);
2156 }
2157
2158 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2159 {
2160 BT_DBG("status 0x%02x", status);
2161
2162 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2163 }
2164
2165 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2166 {
2167 struct mgmt_cp_add_uuid *cp = data;
2168 struct pending_cmd *cmd;
2169 struct hci_request req;
2170 struct bt_uuid *uuid;
2171 int err;
2172
2173 BT_DBG("request for %s", hdev->name);
2174
2175 hci_dev_lock(hdev);
2176
2177 if (pending_eir_or_class(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2179 MGMT_STATUS_BUSY);
2180 goto failed;
2181 }
2182
2183 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2184 if (!uuid) {
2185 err = -ENOMEM;
2186 goto failed;
2187 }
2188
2189 memcpy(uuid->uuid, cp->uuid, 16);
2190 uuid->svc_hint = cp->svc_hint;
2191 uuid->size = get_uuid_size(cp->uuid);
2192
2193 list_add_tail(&uuid->list, &hdev->uuids);
2194
2195 hci_req_init(&req, hdev);
2196
2197 update_class(&req);
2198 update_eir(&req);
2199
2200 err = hci_req_run(&req, add_uuid_complete);
2201 if (err < 0) {
2202 if (err != -ENODATA)
2203 goto failed;
2204
2205 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2206 hdev->dev_class, 3);
2207 goto failed;
2208 }
2209
2210 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2211 if (!cmd) {
2212 err = -ENOMEM;
2213 goto failed;
2214 }
2215
2216 err = 0;
2217
2218 failed:
2219 hci_dev_unlock(hdev);
2220 return err;
2221 }
2222
2223 static bool enable_service_cache(struct hci_dev *hdev)
2224 {
2225 if (!hdev_is_powered(hdev))
2226 return false;
2227
2228 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2229 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2230 CACHE_TIMEOUT);
2231 return true;
2232 }
2233
2234 return false;
2235 }
2236
2237 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2238 {
2239 BT_DBG("status 0x%02x", status);
2240
2241 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2242 }
2243
2244 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2245 u16 len)
2246 {
2247 struct mgmt_cp_remove_uuid *cp = data;
2248 struct pending_cmd *cmd;
2249 struct bt_uuid *match, *tmp;
2250 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2251 struct hci_request req;
2252 int err, found;
2253
2254 BT_DBG("request for %s", hdev->name);
2255
2256 hci_dev_lock(hdev);
2257
2258 if (pending_eir_or_class(hdev)) {
2259 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2260 MGMT_STATUS_BUSY);
2261 goto unlock;
2262 }
2263
2264 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2265 hci_uuids_clear(hdev);
2266
2267 if (enable_service_cache(hdev)) {
2268 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2269 0, hdev->dev_class, 3);
2270 goto unlock;
2271 }
2272
2273 goto update_class;
2274 }
2275
2276 found = 0;
2277
2278 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2279 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2280 continue;
2281
2282 list_del(&match->list);
2283 kfree(match);
2284 found++;
2285 }
2286
2287 if (found == 0) {
2288 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2289 MGMT_STATUS_INVALID_PARAMS);
2290 goto unlock;
2291 }
2292
2293 update_class:
2294 hci_req_init(&req, hdev);
2295
2296 update_class(&req);
2297 update_eir(&req);
2298
2299 err = hci_req_run(&req, remove_uuid_complete);
2300 if (err < 0) {
2301 if (err != -ENODATA)
2302 goto unlock;
2303
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2305 hdev->dev_class, 3);
2306 goto unlock;
2307 }
2308
2309 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2310 if (!cmd) {
2311 err = -ENOMEM;
2312 goto unlock;
2313 }
2314
2315 err = 0;
2316
2317 unlock:
2318 hci_dev_unlock(hdev);
2319 return err;
2320 }
2321
2322 static void set_class_complete(struct hci_dev *hdev, u8 status)
2323 {
2324 BT_DBG("status 0x%02x", status);
2325
2326 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2327 }
2328
2329 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2330 u16 len)
2331 {
2332 struct mgmt_cp_set_dev_class *cp = data;
2333 struct pending_cmd *cmd;
2334 struct hci_request req;
2335 int err;
2336
2337 BT_DBG("request for %s", hdev->name);
2338
2339 if (!lmp_bredr_capable(hdev))
2340 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2341 MGMT_STATUS_NOT_SUPPORTED);
2342
2343 hci_dev_lock(hdev);
2344
2345 if (pending_eir_or_class(hdev)) {
2346 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2347 MGMT_STATUS_BUSY);
2348 goto unlock;
2349 }
2350
2351 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2352 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2353 MGMT_STATUS_INVALID_PARAMS);
2354 goto unlock;
2355 }
2356
2357 hdev->major_class = cp->major;
2358 hdev->minor_class = cp->minor;
2359
2360 if (!hdev_is_powered(hdev)) {
2361 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2362 hdev->dev_class, 3);
2363 goto unlock;
2364 }
2365
2366 hci_req_init(&req, hdev);
2367
2368 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2369 hci_dev_unlock(hdev);
2370 cancel_delayed_work_sync(&hdev->service_cache);
2371 hci_dev_lock(hdev);
2372 update_eir(&req);
2373 }
2374
2375 update_class(&req);
2376
2377 err = hci_req_run(&req, set_class_complete);
2378 if (err < 0) {
2379 if (err != -ENODATA)
2380 goto unlock;
2381
2382 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2383 hdev->dev_class, 3);
2384 goto unlock;
2385 }
2386
2387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2388 if (!cmd) {
2389 err = -ENOMEM;
2390 goto unlock;
2391 }
2392
2393 err = 0;
2394
2395 unlock:
2396 hci_dev_unlock(hdev);
2397 return err;
2398 }
2399
2400 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2401 u16 len)
2402 {
2403 struct mgmt_cp_load_link_keys *cp = data;
2404 u16 key_count, expected_len;
2405 bool changed;
2406 int i;
2407
2408 BT_DBG("request for %s", hdev->name);
2409
2410 if (!lmp_bredr_capable(hdev))
2411 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2412 MGMT_STATUS_NOT_SUPPORTED);
2413
2414 key_count = __le16_to_cpu(cp->key_count);
2415
2416 expected_len = sizeof(*cp) + key_count *
2417 sizeof(struct mgmt_link_key_info);
2418 if (expected_len != len) {
2419 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2420 expected_len, len);
2421 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2422 MGMT_STATUS_INVALID_PARAMS);
2423 }
2424
2425 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2426 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2427 MGMT_STATUS_INVALID_PARAMS);
2428
2429 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2430 key_count);
2431
2432 for (i = 0; i < key_count; i++) {
2433 struct mgmt_link_key_info *key = &cp->keys[i];
2434
2435 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2436 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2437 MGMT_STATUS_INVALID_PARAMS);
2438 }
2439
2440 hci_dev_lock(hdev);
2441
2442 hci_link_keys_clear(hdev);
2443
2444 if (cp->debug_keys)
2445 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2446 &hdev->dev_flags);
2447 else
2448 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2449 &hdev->dev_flags);
2450
2451 if (changed)
2452 new_settings(hdev, NULL);
2453
2454 for (i = 0; i < key_count; i++) {
2455 struct mgmt_link_key_info *key = &cp->keys[i];
2456
2457 /* Always ignore debug keys and require a new pairing if
2458 * the user wants to use them.
2459 */
2460 if (key->type == HCI_LK_DEBUG_COMBINATION)
2461 continue;
2462
2463 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2464 key->type, key->pin_len, NULL);
2465 }
2466
2467 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2468
2469 hci_dev_unlock(hdev);
2470
2471 return 0;
2472 }
2473
2474 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2475 u8 addr_type, struct sock *skip_sk)
2476 {
2477 struct mgmt_ev_device_unpaired ev;
2478
2479 bacpy(&ev.addr.bdaddr, bdaddr);
2480 ev.addr.type = addr_type;
2481
2482 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2483 skip_sk);
2484 }
2485
2486 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2487 u16 len)
2488 {
2489 struct mgmt_cp_unpair_device *cp = data;
2490 struct mgmt_rp_unpair_device rp;
2491 struct hci_cp_disconnect dc;
2492 struct pending_cmd *cmd;
2493 struct hci_conn *conn;
2494 int err;
2495
2496 memset(&rp, 0, sizeof(rp));
2497 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2498 rp.addr.type = cp->addr.type;
2499
2500 if (!bdaddr_type_is_valid(cp->addr.type))
2501 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 MGMT_STATUS_INVALID_PARAMS,
2503 &rp, sizeof(rp));
2504
2505 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2506 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2507 MGMT_STATUS_INVALID_PARAMS,
2508 &rp, sizeof(rp));
2509
2510 hci_dev_lock(hdev);
2511
2512 if (!hdev_is_powered(hdev)) {
2513 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2514 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2515 goto unlock;
2516 }
2517
2518 if (cp->addr.type == BDADDR_BREDR) {
2519 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2520 } else {
2521 u8 addr_type;
2522
2523 if (cp->addr.type == BDADDR_LE_PUBLIC)
2524 addr_type = ADDR_LE_DEV_PUBLIC;
2525 else
2526 addr_type = ADDR_LE_DEV_RANDOM;
2527
2528 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2529
2530 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2531
2532 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2533 }
2534
2535 if (err < 0) {
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2537 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2538 goto unlock;
2539 }
2540
2541 if (cp->disconnect) {
2542 if (cp->addr.type == BDADDR_BREDR)
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2544 &cp->addr.bdaddr);
2545 else
2546 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2547 &cp->addr.bdaddr);
2548 } else {
2549 conn = NULL;
2550 }
2551
2552 if (!conn) {
2553 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2554 &rp, sizeof(rp));
2555 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2556 goto unlock;
2557 }
2558
2559 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 sizeof(*cp));
2561 if (!cmd) {
2562 err = -ENOMEM;
2563 goto unlock;
2564 }
2565
2566 dc.handle = cpu_to_le16(conn->handle);
2567 dc.reason = 0x13; /* Remote User Terminated Connection */
2568 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2569 if (err < 0)
2570 mgmt_pending_remove(cmd);
2571
2572 unlock:
2573 hci_dev_unlock(hdev);
2574 return err;
2575 }
2576
2577 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2578 u16 len)
2579 {
2580 struct mgmt_cp_disconnect *cp = data;
2581 struct mgmt_rp_disconnect rp;
2582 struct hci_cp_disconnect dc;
2583 struct pending_cmd *cmd;
2584 struct hci_conn *conn;
2585 int err;
2586
2587 BT_DBG("");
2588
2589 memset(&rp, 0, sizeof(rp));
2590 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2591 rp.addr.type = cp->addr.type;
2592
2593 if (!bdaddr_type_is_valid(cp->addr.type))
2594 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_INVALID_PARAMS,
2596 &rp, sizeof(rp));
2597
2598 hci_dev_lock(hdev);
2599
2600 if (!test_bit(HCI_UP, &hdev->flags)) {
2601 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2603 goto failed;
2604 }
2605
2606 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2607 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2609 goto failed;
2610 }
2611
2612 if (cp->addr.type == BDADDR_BREDR)
2613 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2614 &cp->addr.bdaddr);
2615 else
2616 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2617
2618 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2619 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2620 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2621 goto failed;
2622 }
2623
2624 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2625 if (!cmd) {
2626 err = -ENOMEM;
2627 goto failed;
2628 }
2629
2630 dc.handle = cpu_to_le16(conn->handle);
2631 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2632
2633 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2634 if (err < 0)
2635 mgmt_pending_remove(cmd);
2636
2637 failed:
2638 hci_dev_unlock(hdev);
2639 return err;
2640 }
2641
2642 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2643 {
2644 switch (link_type) {
2645 case LE_LINK:
2646 switch (addr_type) {
2647 case ADDR_LE_DEV_PUBLIC:
2648 return BDADDR_LE_PUBLIC;
2649
2650 default:
2651 /* Fallback to LE Random address type */
2652 return BDADDR_LE_RANDOM;
2653 }
2654
2655 default:
2656 /* Fallback to BR/EDR type */
2657 return BDADDR_BREDR;
2658 }
2659 }
2660
2661 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2662 u16 data_len)
2663 {
2664 struct mgmt_rp_get_connections *rp;
2665 struct hci_conn *c;
2666 size_t rp_len;
2667 int err;
2668 u16 i;
2669
2670 BT_DBG("");
2671
2672 hci_dev_lock(hdev);
2673
2674 if (!hdev_is_powered(hdev)) {
2675 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2676 MGMT_STATUS_NOT_POWERED);
2677 goto unlock;
2678 }
2679
2680 i = 0;
2681 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2682 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2683 i++;
2684 }
2685
2686 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2687 rp = kmalloc(rp_len, GFP_KERNEL);
2688 if (!rp) {
2689 err = -ENOMEM;
2690 goto unlock;
2691 }
2692
2693 i = 0;
2694 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2695 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2696 continue;
2697 bacpy(&rp->addr[i].bdaddr, &c->dst);
2698 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2699 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2700 continue;
2701 i++;
2702 }
2703
2704 rp->conn_count = cpu_to_le16(i);
2705
2706 /* Recalculate length in case of filtered SCO connections, etc */
2707 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2708
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2710 rp_len);
2711
2712 kfree(rp);
2713
2714 unlock:
2715 hci_dev_unlock(hdev);
2716 return err;
2717 }
2718
2719 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2720 struct mgmt_cp_pin_code_neg_reply *cp)
2721 {
2722 struct pending_cmd *cmd;
2723 int err;
2724
2725 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2726 sizeof(*cp));
2727 if (!cmd)
2728 return -ENOMEM;
2729
2730 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2731 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2732 if (err < 0)
2733 mgmt_pending_remove(cmd);
2734
2735 return err;
2736 }
2737
2738 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2739 u16 len)
2740 {
2741 struct hci_conn *conn;
2742 struct mgmt_cp_pin_code_reply *cp = data;
2743 struct hci_cp_pin_code_reply reply;
2744 struct pending_cmd *cmd;
2745 int err;
2746
2747 BT_DBG("");
2748
2749 hci_dev_lock(hdev);
2750
2751 if (!hdev_is_powered(hdev)) {
2752 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_POWERED);
2754 goto failed;
2755 }
2756
2757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2758 if (!conn) {
2759 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 MGMT_STATUS_NOT_CONNECTED);
2761 goto failed;
2762 }
2763
2764 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2765 struct mgmt_cp_pin_code_neg_reply ncp;
2766
2767 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2768
2769 BT_ERR("PIN code is not 16 bytes long");
2770
2771 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2772 if (err >= 0)
2773 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2774 MGMT_STATUS_INVALID_PARAMS);
2775
2776 goto failed;
2777 }
2778
2779 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2780 if (!cmd) {
2781 err = -ENOMEM;
2782 goto failed;
2783 }
2784
2785 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2786 reply.pin_len = cp->pin_len;
2787 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2788
2789 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2790 if (err < 0)
2791 mgmt_pending_remove(cmd);
2792
2793 failed:
2794 hci_dev_unlock(hdev);
2795 return err;
2796 }
2797
2798 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2799 u16 len)
2800 {
2801 struct mgmt_cp_set_io_capability *cp = data;
2802
2803 BT_DBG("");
2804
2805 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2806 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2807 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2808
2809 hci_dev_lock(hdev);
2810
2811 hdev->io_capability = cp->io_capability;
2812
2813 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2814 hdev->io_capability);
2815
2816 hci_dev_unlock(hdev);
2817
2818 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2819 0);
2820 }
2821
2822 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2823 {
2824 struct hci_dev *hdev = conn->hdev;
2825 struct pending_cmd *cmd;
2826
2827 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2828 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2829 continue;
2830
2831 if (cmd->user_data != conn)
2832 continue;
2833
2834 return cmd;
2835 }
2836
2837 return NULL;
2838 }
2839
2840 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2841 {
2842 struct mgmt_rp_pair_device rp;
2843 struct hci_conn *conn = cmd->user_data;
2844
2845 bacpy(&rp.addr.bdaddr, &conn->dst);
2846 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2847
2848 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2849 &rp, sizeof(rp));
2850
2851 /* So we don't get further callbacks for this connection */
2852 conn->connect_cfm_cb = NULL;
2853 conn->security_cfm_cb = NULL;
2854 conn->disconn_cfm_cb = NULL;
2855
2856 hci_conn_drop(conn);
2857
2858 mgmt_pending_remove(cmd);
2859 }
2860
2861 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2862 {
2863 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2864 struct pending_cmd *cmd;
2865
2866 cmd = find_pairing(conn);
2867 if (cmd)
2868 pairing_complete(cmd, status);
2869 }
2870
2871 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2872 {
2873 struct pending_cmd *cmd;
2874
2875 BT_DBG("status %u", status);
2876
2877 cmd = find_pairing(conn);
2878 if (!cmd)
2879 BT_DBG("Unable to find a pending command");
2880 else
2881 pairing_complete(cmd, mgmt_status(status));
2882 }
2883
2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 {
2886 struct pending_cmd *cmd;
2887
2888 BT_DBG("status %u", status);
2889
2890 if (!status)
2891 return;
2892
2893 cmd = find_pairing(conn);
2894 if (!cmd)
2895 BT_DBG("Unable to find a pending command");
2896 else
2897 pairing_complete(cmd, mgmt_status(status));
2898 }
2899
2900 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2901 u16 len)
2902 {
2903 struct mgmt_cp_pair_device *cp = data;
2904 struct mgmt_rp_pair_device rp;
2905 struct pending_cmd *cmd;
2906 u8 sec_level, auth_type;
2907 struct hci_conn *conn;
2908 int err;
2909
2910 BT_DBG("");
2911
2912 memset(&rp, 0, sizeof(rp));
2913 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2914 rp.addr.type = cp->addr.type;
2915
2916 if (!bdaddr_type_is_valid(cp->addr.type))
2917 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2918 MGMT_STATUS_INVALID_PARAMS,
2919 &rp, sizeof(rp));
2920
2921 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2922 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2923 MGMT_STATUS_INVALID_PARAMS,
2924 &rp, sizeof(rp));
2925
2926 hci_dev_lock(hdev);
2927
2928 if (!hdev_is_powered(hdev)) {
2929 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2930 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2931 goto unlock;
2932 }
2933
2934 sec_level = BT_SECURITY_MEDIUM;
2935 auth_type = HCI_AT_DEDICATED_BONDING;
2936
2937 if (cp->addr.type == BDADDR_BREDR) {
2938 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2939 auth_type);
2940 } else {
2941 u8 addr_type;
2942
2943 /* Convert from L2CAP channel address type to HCI address type
2944 */
2945 if (cp->addr.type == BDADDR_LE_PUBLIC)
2946 addr_type = ADDR_LE_DEV_PUBLIC;
2947 else
2948 addr_type = ADDR_LE_DEV_RANDOM;
2949
2950 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2951 sec_level, auth_type);
2952 }
2953
2954 if (IS_ERR(conn)) {
2955 int status;
2956
2957 if (PTR_ERR(conn) == -EBUSY)
2958 status = MGMT_STATUS_BUSY;
2959 else
2960 status = MGMT_STATUS_CONNECT_FAILED;
2961
2962 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2963 status, &rp,
2964 sizeof(rp));
2965 goto unlock;
2966 }
2967
2968 if (conn->connect_cfm_cb) {
2969 hci_conn_drop(conn);
2970 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2971 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2972 goto unlock;
2973 }
2974
2975 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2976 if (!cmd) {
2977 err = -ENOMEM;
2978 hci_conn_drop(conn);
2979 goto unlock;
2980 }
2981
2982 /* For LE, just connecting isn't a proof that the pairing finished */
2983 if (cp->addr.type == BDADDR_BREDR) {
2984 conn->connect_cfm_cb = pairing_complete_cb;
2985 conn->security_cfm_cb = pairing_complete_cb;
2986 conn->disconn_cfm_cb = pairing_complete_cb;
2987 } else {
2988 conn->connect_cfm_cb = le_pairing_complete_cb;
2989 conn->security_cfm_cb = le_pairing_complete_cb;
2990 conn->disconn_cfm_cb = le_pairing_complete_cb;
2991 }
2992
2993 conn->io_capability = cp->io_cap;
2994 cmd->user_data = conn;
2995
2996 if (conn->state == BT_CONNECTED &&
2997 hci_conn_security(conn, sec_level, auth_type))
2998 pairing_complete(cmd, 0);
2999
3000 err = 0;
3001
3002 unlock:
3003 hci_dev_unlock(hdev);
3004 return err;
3005 }
3006
3007 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3008 u16 len)
3009 {
3010 struct mgmt_addr_info *addr = data;
3011 struct pending_cmd *cmd;
3012 struct hci_conn *conn;
3013 int err;
3014
3015 BT_DBG("");
3016
3017 hci_dev_lock(hdev);
3018
3019 if (!hdev_is_powered(hdev)) {
3020 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3021 MGMT_STATUS_NOT_POWERED);
3022 goto unlock;
3023 }
3024
3025 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3026 if (!cmd) {
3027 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3028 MGMT_STATUS_INVALID_PARAMS);
3029 goto unlock;
3030 }
3031
3032 conn = cmd->user_data;
3033
3034 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3035 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3036 MGMT_STATUS_INVALID_PARAMS);
3037 goto unlock;
3038 }
3039
3040 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3041
3042 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3043 addr, sizeof(*addr));
3044 unlock:
3045 hci_dev_unlock(hdev);
3046 return err;
3047 }
3048
3049 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3050 struct mgmt_addr_info *addr, u16 mgmt_op,
3051 u16 hci_op, __le32 passkey)
3052 {
3053 struct pending_cmd *cmd;
3054 struct hci_conn *conn;
3055 int err;
3056
3057 hci_dev_lock(hdev);
3058
3059 if (!hdev_is_powered(hdev)) {
3060 err = cmd_complete(sk, hdev->id, mgmt_op,
3061 MGMT_STATUS_NOT_POWERED, addr,
3062 sizeof(*addr));
3063 goto done;
3064 }
3065
3066 if (addr->type == BDADDR_BREDR)
3067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3068 else
3069 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3070
3071 if (!conn) {
3072 err = cmd_complete(sk, hdev->id, mgmt_op,
3073 MGMT_STATUS_NOT_CONNECTED, addr,
3074 sizeof(*addr));
3075 goto done;
3076 }
3077
3078 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3079 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3080 if (!err)
3081 err = cmd_complete(sk, hdev->id, mgmt_op,
3082 MGMT_STATUS_SUCCESS, addr,
3083 sizeof(*addr));
3084 else
3085 err = cmd_complete(sk, hdev->id, mgmt_op,
3086 MGMT_STATUS_FAILED, addr,
3087 sizeof(*addr));
3088
3089 goto done;
3090 }
3091
3092 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3093 if (!cmd) {
3094 err = -ENOMEM;
3095 goto done;
3096 }
3097
3098 /* Continue with pairing via HCI */
3099 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3100 struct hci_cp_user_passkey_reply cp;
3101
3102 bacpy(&cp.bdaddr, &addr->bdaddr);
3103 cp.passkey = passkey;
3104 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3105 } else
3106 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3107 &addr->bdaddr);
3108
3109 if (err < 0)
3110 mgmt_pending_remove(cmd);
3111
3112 done:
3113 hci_dev_unlock(hdev);
3114 return err;
3115 }
3116
3117 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3118 void *data, u16 len)
3119 {
3120 struct mgmt_cp_pin_code_neg_reply *cp = data;
3121
3122 BT_DBG("");
3123
3124 return user_pairing_resp(sk, hdev, &cp->addr,
3125 MGMT_OP_PIN_CODE_NEG_REPLY,
3126 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3127 }
3128
3129 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3130 u16 len)
3131 {
3132 struct mgmt_cp_user_confirm_reply *cp = data;
3133
3134 BT_DBG("");
3135
3136 if (len != sizeof(*cp))
3137 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3138 MGMT_STATUS_INVALID_PARAMS);
3139
3140 return user_pairing_resp(sk, hdev, &cp->addr,
3141 MGMT_OP_USER_CONFIRM_REPLY,
3142 HCI_OP_USER_CONFIRM_REPLY, 0);
3143 }
3144
3145 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3146 void *data, u16 len)
3147 {
3148 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3149
3150 BT_DBG("");
3151
3152 return user_pairing_resp(sk, hdev, &cp->addr,
3153 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3154 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3155 }
3156
3157 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3158 u16 len)
3159 {
3160 struct mgmt_cp_user_passkey_reply *cp = data;
3161
3162 BT_DBG("");
3163
3164 return user_pairing_resp(sk, hdev, &cp->addr,
3165 MGMT_OP_USER_PASSKEY_REPLY,
3166 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3167 }
3168
3169 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 void *data, u16 len)
3171 {
3172 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3173
3174 BT_DBG("");
3175
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3178 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3179 }
3180
3181 static void update_name(struct hci_request *req)
3182 {
3183 struct hci_dev *hdev = req->hdev;
3184 struct hci_cp_write_local_name cp;
3185
3186 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3187
3188 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3189 }
3190
3191 static void set_name_complete(struct hci_dev *hdev, u8 status)
3192 {
3193 struct mgmt_cp_set_local_name *cp;
3194 struct pending_cmd *cmd;
3195
3196 BT_DBG("status 0x%02x", status);
3197
3198 hci_dev_lock(hdev);
3199
3200 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3201 if (!cmd)
3202 goto unlock;
3203
3204 cp = cmd->param;
3205
3206 if (status)
3207 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3208 mgmt_status(status));
3209 else
3210 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3211 cp, sizeof(*cp));
3212
3213 mgmt_pending_remove(cmd);
3214
3215 unlock:
3216 hci_dev_unlock(hdev);
3217 }
3218
3219 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3220 u16 len)
3221 {
3222 struct mgmt_cp_set_local_name *cp = data;
3223 struct pending_cmd *cmd;
3224 struct hci_request req;
3225 int err;
3226
3227 BT_DBG("");
3228
3229 hci_dev_lock(hdev);
3230
3231 /* If the old values are the same as the new ones just return a
3232 * direct command complete event.
3233 */
3234 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3235 !memcmp(hdev->short_name, cp->short_name,
3236 sizeof(hdev->short_name))) {
3237 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3238 data, len);
3239 goto failed;
3240 }
3241
3242 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3243
3244 if (!hdev_is_powered(hdev)) {
3245 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3246
3247 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3248 data, len);
3249 if (err < 0)
3250 goto failed;
3251
3252 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3253 sk);
3254
3255 goto failed;
3256 }
3257
3258 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3259 if (!cmd) {
3260 err = -ENOMEM;
3261 goto failed;
3262 }
3263
3264 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3265
3266 hci_req_init(&req, hdev);
3267
3268 if (lmp_bredr_capable(hdev)) {
3269 update_name(&req);
3270 update_eir(&req);
3271 }
3272
3273 /* The name is stored in the scan response data and so
3274 * no need to udpate the advertising data here.
3275 */
3276 if (lmp_le_capable(hdev))
3277 update_scan_rsp_data(&req);
3278
3279 err = hci_req_run(&req, set_name_complete);
3280 if (err < 0)
3281 mgmt_pending_remove(cmd);
3282
3283 failed:
3284 hci_dev_unlock(hdev);
3285 return err;
3286 }
3287
3288 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3289 void *data, u16 data_len)
3290 {
3291 struct pending_cmd *cmd;
3292 int err;
3293
3294 BT_DBG("%s", hdev->name);
3295
3296 hci_dev_lock(hdev);
3297
3298 if (!hdev_is_powered(hdev)) {
3299 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3300 MGMT_STATUS_NOT_POWERED);
3301 goto unlock;
3302 }
3303
3304 if (!lmp_ssp_capable(hdev)) {
3305 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3306 MGMT_STATUS_NOT_SUPPORTED);
3307 goto unlock;
3308 }
3309
3310 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3311 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3312 MGMT_STATUS_BUSY);
3313 goto unlock;
3314 }
3315
3316 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3317 if (!cmd) {
3318 err = -ENOMEM;
3319 goto unlock;
3320 }
3321
3322 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3323 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3324 0, NULL);
3325 else
3326 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3327
3328 if (err < 0)
3329 mgmt_pending_remove(cmd);
3330
3331 unlock:
3332 hci_dev_unlock(hdev);
3333 return err;
3334 }
3335
3336 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3337 void *data, u16 len)
3338 {
3339 int err;
3340
3341 BT_DBG("%s ", hdev->name);
3342
3343 hci_dev_lock(hdev);
3344
3345 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3346 struct mgmt_cp_add_remote_oob_data *cp = data;
3347 u8 status;
3348
3349 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3350 cp->hash, cp->randomizer);
3351 if (err < 0)
3352 status = MGMT_STATUS_FAILED;
3353 else
3354 status = MGMT_STATUS_SUCCESS;
3355
3356 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3357 status, &cp->addr, sizeof(cp->addr));
3358 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3359 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3360 u8 status;
3361
3362 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3363 cp->hash192,
3364 cp->randomizer192,
3365 cp->hash256,
3366 cp->randomizer256);
3367 if (err < 0)
3368 status = MGMT_STATUS_FAILED;
3369 else
3370 status = MGMT_STATUS_SUCCESS;
3371
3372 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3373 status, &cp->addr, sizeof(cp->addr));
3374 } else {
3375 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3376 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3377 MGMT_STATUS_INVALID_PARAMS);
3378 }
3379
3380 hci_dev_unlock(hdev);
3381 return err;
3382 }
3383
3384 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3385 void *data, u16 len)
3386 {
3387 struct mgmt_cp_remove_remote_oob_data *cp = data;
3388 u8 status;
3389 int err;
3390
3391 BT_DBG("%s", hdev->name);
3392
3393 hci_dev_lock(hdev);
3394
3395 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3396 if (err < 0)
3397 status = MGMT_STATUS_INVALID_PARAMS;
3398 else
3399 status = MGMT_STATUS_SUCCESS;
3400
3401 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3402 status, &cp->addr, sizeof(cp->addr));
3403
3404 hci_dev_unlock(hdev);
3405 return err;
3406 }
3407
3408 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3409 {
3410 struct pending_cmd *cmd;
3411 u8 type;
3412 int err;
3413
3414 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3415
3416 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3417 if (!cmd)
3418 return -ENOENT;
3419
3420 type = hdev->discovery.type;
3421
3422 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3423 &type, sizeof(type));
3424 mgmt_pending_remove(cmd);
3425
3426 return err;
3427 }
3428
3429 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3430 {
3431 unsigned long timeout = 0;
3432
3433 BT_DBG("status %d", status);
3434
3435 if (status) {
3436 hci_dev_lock(hdev);
3437 mgmt_start_discovery_failed(hdev, status);
3438 hci_dev_unlock(hdev);
3439 return;
3440 }
3441
3442 hci_dev_lock(hdev);
3443 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3444 hci_dev_unlock(hdev);
3445
3446 switch (hdev->discovery.type) {
3447 case DISCOV_TYPE_LE:
3448 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3449 break;
3450
3451 case DISCOV_TYPE_INTERLEAVED:
3452 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3453 break;
3454
3455 case DISCOV_TYPE_BREDR:
3456 break;
3457
3458 default:
3459 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3460 }
3461
3462 if (!timeout)
3463 return;
3464
3465 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3466 }
3467
3468 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3469 void *data, u16 len)
3470 {
3471 struct mgmt_cp_start_discovery *cp = data;
3472 struct pending_cmd *cmd;
3473 struct hci_cp_le_set_scan_param param_cp;
3474 struct hci_cp_le_set_scan_enable enable_cp;
3475 struct hci_cp_inquiry inq_cp;
3476 struct hci_request req;
3477 /* General inquiry access code (GIAC) */
3478 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3479 u8 status, own_addr_type;
3480 int err;
3481
3482 BT_DBG("%s", hdev->name);
3483
3484 hci_dev_lock(hdev);
3485
3486 if (!hdev_is_powered(hdev)) {
3487 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3488 MGMT_STATUS_NOT_POWERED);
3489 goto failed;
3490 }
3491
3492 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3493 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3494 MGMT_STATUS_BUSY);
3495 goto failed;
3496 }
3497
3498 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3499 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3500 MGMT_STATUS_BUSY);
3501 goto failed;
3502 }
3503
3504 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3505 if (!cmd) {
3506 err = -ENOMEM;
3507 goto failed;
3508 }
3509
3510 hdev->discovery.type = cp->type;
3511
3512 hci_req_init(&req, hdev);
3513
3514 switch (hdev->discovery.type) {
3515 case DISCOV_TYPE_BREDR:
3516 status = mgmt_bredr_support(hdev);
3517 if (status) {
3518 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3519 status);
3520 mgmt_pending_remove(cmd);
3521 goto failed;
3522 }
3523
3524 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3525 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3526 MGMT_STATUS_BUSY);
3527 mgmt_pending_remove(cmd);
3528 goto failed;
3529 }
3530
3531 hci_inquiry_cache_flush(hdev);
3532
3533 memset(&inq_cp, 0, sizeof(inq_cp));
3534 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3535 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3536 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3537 break;
3538
3539 case DISCOV_TYPE_LE:
3540 case DISCOV_TYPE_INTERLEAVED:
3541 status = mgmt_le_support(hdev);
3542 if (status) {
3543 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3544 status);
3545 mgmt_pending_remove(cmd);
3546 goto failed;
3547 }
3548
3549 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3550 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3551 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3552 MGMT_STATUS_NOT_SUPPORTED);
3553 mgmt_pending_remove(cmd);
3554 goto failed;
3555 }
3556
3557 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3558 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3559 MGMT_STATUS_REJECTED);
3560 mgmt_pending_remove(cmd);
3561 goto failed;
3562 }
3563
3564 /* If controller is scanning, it means the background scanning
3565 * is running. Thus, we should temporarily stop it in order to
3566 * set the discovery scanning parameters.
3567 */
3568 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3569 hci_req_add_le_scan_disable(&req);
3570
3571 memset(&param_cp, 0, sizeof(param_cp));
3572
3573 /* All active scans will be done with either a resolvable
3574 * private address (when privacy feature has been enabled)
3575 * or unresolvable private address.
3576 */
3577 err = hci_update_random_address(&req, true, &own_addr_type);
3578 if (err < 0) {
3579 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3580 MGMT_STATUS_FAILED);
3581 mgmt_pending_remove(cmd);
3582 goto failed;
3583 }
3584
3585 param_cp.type = LE_SCAN_ACTIVE;
3586 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3587 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3588 param_cp.own_address_type = own_addr_type;
3589 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3590 &param_cp);
3591
3592 memset(&enable_cp, 0, sizeof(enable_cp));
3593 enable_cp.enable = LE_SCAN_ENABLE;
3594 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3595 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3596 &enable_cp);
3597 break;
3598
3599 default:
3600 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3601 MGMT_STATUS_INVALID_PARAMS);
3602 mgmt_pending_remove(cmd);
3603 goto failed;
3604 }
3605
3606 err = hci_req_run(&req, start_discovery_complete);
3607 if (err < 0)
3608 mgmt_pending_remove(cmd);
3609 else
3610 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3611
3612 failed:
3613 hci_dev_unlock(hdev);
3614 return err;
3615 }
3616
3617 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3618 {
3619 struct pending_cmd *cmd;
3620 int err;
3621
3622 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3623 if (!cmd)
3624 return -ENOENT;
3625
3626 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3627 &hdev->discovery.type, sizeof(hdev->discovery.type));
3628 mgmt_pending_remove(cmd);
3629
3630 return err;
3631 }
3632
3633 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3634 {
3635 BT_DBG("status %d", status);
3636
3637 hci_dev_lock(hdev);
3638
3639 if (status) {
3640 mgmt_stop_discovery_failed(hdev, status);
3641 goto unlock;
3642 }
3643
3644 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3645
3646 unlock:
3647 hci_dev_unlock(hdev);
3648 }
3649
3650 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3651 u16 len)
3652 {
3653 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3654 struct pending_cmd *cmd;
3655 struct hci_request req;
3656 int err;
3657
3658 BT_DBG("%s", hdev->name);
3659
3660 hci_dev_lock(hdev);
3661
3662 if (!hci_discovery_active(hdev)) {
3663 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3664 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3665 sizeof(mgmt_cp->type));
3666 goto unlock;
3667 }
3668
3669 if (hdev->discovery.type != mgmt_cp->type) {
3670 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3671 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3672 sizeof(mgmt_cp->type));
3673 goto unlock;
3674 }
3675
3676 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3677 if (!cmd) {
3678 err = -ENOMEM;
3679 goto unlock;
3680 }
3681
3682 hci_req_init(&req, hdev);
3683
3684 hci_stop_discovery(&req);
3685
3686 err = hci_req_run(&req, stop_discovery_complete);
3687 if (!err) {
3688 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3689 goto unlock;
3690 }
3691
3692 mgmt_pending_remove(cmd);
3693
3694 /* If no HCI commands were sent we're done */
3695 if (err == -ENODATA) {
3696 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3697 &mgmt_cp->type, sizeof(mgmt_cp->type));
3698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3699 }
3700
3701 unlock:
3702 hci_dev_unlock(hdev);
3703 return err;
3704 }
3705
3706 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3707 u16 len)
3708 {
3709 struct mgmt_cp_confirm_name *cp = data;
3710 struct inquiry_entry *e;
3711 int err;
3712
3713 BT_DBG("%s", hdev->name);
3714
3715 hci_dev_lock(hdev);
3716
3717 if (!hci_discovery_active(hdev)) {
3718 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3719 MGMT_STATUS_FAILED, &cp->addr,
3720 sizeof(cp->addr));
3721 goto failed;
3722 }
3723
3724 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3725 if (!e) {
3726 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3727 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3728 sizeof(cp->addr));
3729 goto failed;
3730 }
3731
3732 if (cp->name_known) {
3733 e->name_state = NAME_KNOWN;
3734 list_del(&e->list);
3735 } else {
3736 e->name_state = NAME_NEEDED;
3737 hci_inquiry_cache_update_resolve(hdev, e);
3738 }
3739
3740 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3741 sizeof(cp->addr));
3742
3743 failed:
3744 hci_dev_unlock(hdev);
3745 return err;
3746 }
3747
3748 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3749 u16 len)
3750 {
3751 struct mgmt_cp_block_device *cp = data;
3752 u8 status;
3753 int err;
3754
3755 BT_DBG("%s", hdev->name);
3756
3757 if (!bdaddr_type_is_valid(cp->addr.type))
3758 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3759 MGMT_STATUS_INVALID_PARAMS,
3760 &cp->addr, sizeof(cp->addr));
3761
3762 hci_dev_lock(hdev);
3763
3764 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3765 if (err < 0)
3766 status = MGMT_STATUS_FAILED;
3767 else
3768 status = MGMT_STATUS_SUCCESS;
3769
3770 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3771 &cp->addr, sizeof(cp->addr));
3772
3773 hci_dev_unlock(hdev);
3774
3775 return err;
3776 }
3777
3778 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3779 u16 len)
3780 {
3781 struct mgmt_cp_unblock_device *cp = data;
3782 u8 status;
3783 int err;
3784
3785 BT_DBG("%s", hdev->name);
3786
3787 if (!bdaddr_type_is_valid(cp->addr.type))
3788 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3789 MGMT_STATUS_INVALID_PARAMS,
3790 &cp->addr, sizeof(cp->addr));
3791
3792 hci_dev_lock(hdev);
3793
3794 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3795 if (err < 0)
3796 status = MGMT_STATUS_INVALID_PARAMS;
3797 else
3798 status = MGMT_STATUS_SUCCESS;
3799
3800 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3801 &cp->addr, sizeof(cp->addr));
3802
3803 hci_dev_unlock(hdev);
3804
3805 return err;
3806 }
3807
3808 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3809 u16 len)
3810 {
3811 struct mgmt_cp_set_device_id *cp = data;
3812 struct hci_request req;
3813 int err;
3814 __u16 source;
3815
3816 BT_DBG("%s", hdev->name);
3817
3818 source = __le16_to_cpu(cp->source);
3819
3820 if (source > 0x0002)
3821 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3822 MGMT_STATUS_INVALID_PARAMS);
3823
3824 hci_dev_lock(hdev);
3825
3826 hdev->devid_source = source;
3827 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3828 hdev->devid_product = __le16_to_cpu(cp->product);
3829 hdev->devid_version = __le16_to_cpu(cp->version);
3830
3831 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3832
3833 hci_req_init(&req, hdev);
3834 update_eir(&req);
3835 hci_req_run(&req, NULL);
3836
3837 hci_dev_unlock(hdev);
3838
3839 return err;
3840 }
3841
3842 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3843 {
3844 struct cmd_lookup match = { NULL, hdev };
3845
3846 if (status) {
3847 u8 mgmt_err = mgmt_status(status);
3848
3849 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3850 cmd_status_rsp, &mgmt_err);
3851 return;
3852 }
3853
3854 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3855 &match);
3856
3857 new_settings(hdev, match.sk);
3858
3859 if (match.sk)
3860 sock_put(match.sk);
3861 }
3862
3863 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3864 u16 len)
3865 {
3866 struct mgmt_mode *cp = data;
3867 struct pending_cmd *cmd;
3868 struct hci_request req;
3869 u8 val, enabled, status;
3870 int err;
3871
3872 BT_DBG("request for %s", hdev->name);
3873
3874 status = mgmt_le_support(hdev);
3875 if (status)
3876 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3877 status);
3878
3879 if (cp->val != 0x00 && cp->val != 0x01)
3880 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3881 MGMT_STATUS_INVALID_PARAMS);
3882
3883 hci_dev_lock(hdev);
3884
3885 val = !!cp->val;
3886 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3887
3888 /* The following conditions are ones which mean that we should
3889 * not do any HCI communication but directly send a mgmt
3890 * response to user space (after toggling the flag if
3891 * necessary).
3892 */
3893 if (!hdev_is_powered(hdev) || val == enabled ||
3894 hci_conn_num(hdev, LE_LINK) > 0) {
3895 bool changed = false;
3896
3897 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3898 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3899 changed = true;
3900 }
3901
3902 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3903 if (err < 0)
3904 goto unlock;
3905
3906 if (changed)
3907 err = new_settings(hdev, sk);
3908
3909 goto unlock;
3910 }
3911
3912 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3913 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3914 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3915 MGMT_STATUS_BUSY);
3916 goto unlock;
3917 }
3918
3919 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3920 if (!cmd) {
3921 err = -ENOMEM;
3922 goto unlock;
3923 }
3924
3925 hci_req_init(&req, hdev);
3926
3927 if (val)
3928 enable_advertising(&req);
3929 else
3930 disable_advertising(&req);
3931
3932 err = hci_req_run(&req, set_advertising_complete);
3933 if (err < 0)
3934 mgmt_pending_remove(cmd);
3935
3936 unlock:
3937 hci_dev_unlock(hdev);
3938 return err;
3939 }
3940
3941 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3942 void *data, u16 len)
3943 {
3944 struct mgmt_cp_set_static_address *cp = data;
3945 int err;
3946
3947 BT_DBG("%s", hdev->name);
3948
3949 if (!lmp_le_capable(hdev))
3950 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3951 MGMT_STATUS_NOT_SUPPORTED);
3952
3953 if (hdev_is_powered(hdev))
3954 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3955 MGMT_STATUS_REJECTED);
3956
3957 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3958 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3959 return cmd_status(sk, hdev->id,
3960 MGMT_OP_SET_STATIC_ADDRESS,
3961 MGMT_STATUS_INVALID_PARAMS);
3962
3963 /* Two most significant bits shall be set */
3964 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3965 return cmd_status(sk, hdev->id,
3966 MGMT_OP_SET_STATIC_ADDRESS,
3967 MGMT_STATUS_INVALID_PARAMS);
3968 }
3969
3970 hci_dev_lock(hdev);
3971
3972 bacpy(&hdev->static_addr, &cp->bdaddr);
3973
3974 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3975
3976 hci_dev_unlock(hdev);
3977
3978 return err;
3979 }
3980
3981 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3982 void *data, u16 len)
3983 {
3984 struct mgmt_cp_set_scan_params *cp = data;
3985 __u16 interval, window;
3986 int err;
3987
3988 BT_DBG("%s", hdev->name);
3989
3990 if (!lmp_le_capable(hdev))
3991 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3992 MGMT_STATUS_NOT_SUPPORTED);
3993
3994 interval = __le16_to_cpu(cp->interval);
3995
3996 if (interval < 0x0004 || interval > 0x4000)
3997 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3998 MGMT_STATUS_INVALID_PARAMS);
3999
4000 window = __le16_to_cpu(cp->window);
4001
4002 if (window < 0x0004 || window > 0x4000)
4003 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4004 MGMT_STATUS_INVALID_PARAMS);
4005
4006 if (window > interval)
4007 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4008 MGMT_STATUS_INVALID_PARAMS);
4009
4010 hci_dev_lock(hdev);
4011
4012 hdev->le_scan_interval = interval;
4013 hdev->le_scan_window = window;
4014
4015 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4016
4017 /* If background scan is running, restart it so new parameters are
4018 * loaded.
4019 */
4020 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4021 hdev->discovery.state == DISCOVERY_STOPPED) {
4022 struct hci_request req;
4023
4024 hci_req_init(&req, hdev);
4025
4026 hci_req_add_le_scan_disable(&req);
4027 hci_req_add_le_passive_scan(&req);
4028
4029 hci_req_run(&req, NULL);
4030 }
4031
4032 hci_dev_unlock(hdev);
4033
4034 return err;
4035 }
4036
4037 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4038 {
4039 struct pending_cmd *cmd;
4040
4041 BT_DBG("status 0x%02x", status);
4042
4043 hci_dev_lock(hdev);
4044
4045 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4046 if (!cmd)
4047 goto unlock;
4048
4049 if (status) {
4050 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4051 mgmt_status(status));
4052 } else {
4053 struct mgmt_mode *cp = cmd->param;
4054
4055 if (cp->val)
4056 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4057 else
4058 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4059
4060 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4061 new_settings(hdev, cmd->sk);
4062 }
4063
4064 mgmt_pending_remove(cmd);
4065
4066 unlock:
4067 hci_dev_unlock(hdev);
4068 }
4069
4070 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4071 void *data, u16 len)
4072 {
4073 struct mgmt_mode *cp = data;
4074 struct pending_cmd *cmd;
4075 struct hci_request req;
4076 int err;
4077
4078 BT_DBG("%s", hdev->name);
4079
4080 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4081 hdev->hci_ver < BLUETOOTH_VER_1_2)
4082 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4084
4085 if (cp->val != 0x00 && cp->val != 0x01)
4086 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4087 MGMT_STATUS_INVALID_PARAMS);
4088
4089 if (!hdev_is_powered(hdev))
4090 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4091 MGMT_STATUS_NOT_POWERED);
4092
4093 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4094 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4095 MGMT_STATUS_REJECTED);
4096
4097 hci_dev_lock(hdev);
4098
4099 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4101 MGMT_STATUS_BUSY);
4102 goto unlock;
4103 }
4104
4105 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4106 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4107 hdev);
4108 goto unlock;
4109 }
4110
4111 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4112 data, len);
4113 if (!cmd) {
4114 err = -ENOMEM;
4115 goto unlock;
4116 }
4117
4118 hci_req_init(&req, hdev);
4119
4120 write_fast_connectable(&req, cp->val);
4121
4122 err = hci_req_run(&req, fast_connectable_complete);
4123 if (err < 0) {
4124 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4125 MGMT_STATUS_FAILED);
4126 mgmt_pending_remove(cmd);
4127 }
4128
4129 unlock:
4130 hci_dev_unlock(hdev);
4131
4132 return err;
4133 }
4134
4135 static void set_bredr_scan(struct hci_request *req)
4136 {
4137 struct hci_dev *hdev = req->hdev;
4138 u8 scan = 0;
4139
4140 /* Ensure that fast connectable is disabled. This function will
4141 * not do anything if the page scan parameters are already what
4142 * they should be.
4143 */
4144 write_fast_connectable(req, false);
4145
4146 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4147 scan |= SCAN_PAGE;
4148 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4149 scan |= SCAN_INQUIRY;
4150
4151 if (scan)
4152 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4153 }
4154
4155 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4156 {
4157 struct pending_cmd *cmd;
4158
4159 BT_DBG("status 0x%02x", status);
4160
4161 hci_dev_lock(hdev);
4162
4163 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4164 if (!cmd)
4165 goto unlock;
4166
4167 if (status) {
4168 u8 mgmt_err = mgmt_status(status);
4169
4170 /* We need to restore the flag if related HCI commands
4171 * failed.
4172 */
4173 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4174
4175 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4176 } else {
4177 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4178 new_settings(hdev, cmd->sk);
4179 }
4180
4181 mgmt_pending_remove(cmd);
4182
4183 unlock:
4184 hci_dev_unlock(hdev);
4185 }
4186
4187 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4188 {
4189 struct mgmt_mode *cp = data;
4190 struct pending_cmd *cmd;
4191 struct hci_request req;
4192 int err;
4193
4194 BT_DBG("request for %s", hdev->name);
4195
4196 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4197 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4198 MGMT_STATUS_NOT_SUPPORTED);
4199
4200 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4201 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4202 MGMT_STATUS_REJECTED);
4203
4204 if (cp->val != 0x00 && cp->val != 0x01)
4205 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4206 MGMT_STATUS_INVALID_PARAMS);
4207
4208 hci_dev_lock(hdev);
4209
4210 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4211 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4212 goto unlock;
4213 }
4214
4215 if (!hdev_is_powered(hdev)) {
4216 if (!cp->val) {
4217 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4218 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4219 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4220 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4221 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4222 }
4223
4224 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4225
4226 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4227 if (err < 0)
4228 goto unlock;
4229
4230 err = new_settings(hdev, sk);
4231 goto unlock;
4232 }
4233
4234 /* Reject disabling when powered on */
4235 if (!cp->val) {
4236 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4237 MGMT_STATUS_REJECTED);
4238 goto unlock;
4239 }
4240
4241 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4242 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4243 MGMT_STATUS_BUSY);
4244 goto unlock;
4245 }
4246
4247 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4248 if (!cmd) {
4249 err = -ENOMEM;
4250 goto unlock;
4251 }
4252
4253 /* We need to flip the bit already here so that update_adv_data
4254 * generates the correct flags.
4255 */
4256 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4257
4258 hci_req_init(&req, hdev);
4259
4260 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4261 set_bredr_scan(&req);
4262
4263 /* Since only the advertising data flags will change, there
4264 * is no need to update the scan response data.
4265 */
4266 update_adv_data(&req);
4267
4268 err = hci_req_run(&req, set_bredr_complete);
4269 if (err < 0)
4270 mgmt_pending_remove(cmd);
4271
4272 unlock:
4273 hci_dev_unlock(hdev);
4274 return err;
4275 }
4276
4277 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4278 void *data, u16 len)
4279 {
4280 struct mgmt_mode *cp = data;
4281 struct pending_cmd *cmd;
4282 u8 val, status;
4283 int err;
4284
4285 BT_DBG("request for %s", hdev->name);
4286
4287 status = mgmt_bredr_support(hdev);
4288 if (status)
4289 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4290 status);
4291
4292 if (!lmp_sc_capable(hdev) &&
4293 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4294 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4295 MGMT_STATUS_NOT_SUPPORTED);
4296
4297 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4298 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4299 MGMT_STATUS_INVALID_PARAMS);
4300
4301 hci_dev_lock(hdev);
4302
4303 if (!hdev_is_powered(hdev)) {
4304 bool changed;
4305
4306 if (cp->val) {
4307 changed = !test_and_set_bit(HCI_SC_ENABLED,
4308 &hdev->dev_flags);
4309 if (cp->val == 0x02)
4310 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4311 else
4312 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4313 } else {
4314 changed = test_and_clear_bit(HCI_SC_ENABLED,
4315 &hdev->dev_flags);
4316 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4317 }
4318
4319 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4320 if (err < 0)
4321 goto failed;
4322
4323 if (changed)
4324 err = new_settings(hdev, sk);
4325
4326 goto failed;
4327 }
4328
4329 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4330 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4331 MGMT_STATUS_BUSY);
4332 goto failed;
4333 }
4334
4335 val = !!cp->val;
4336
4337 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4338 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4339 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4340 goto failed;
4341 }
4342
4343 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4344 if (!cmd) {
4345 err = -ENOMEM;
4346 goto failed;
4347 }
4348
4349 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4350 if (err < 0) {
4351 mgmt_pending_remove(cmd);
4352 goto failed;
4353 }
4354
4355 if (cp->val == 0x02)
4356 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4357 else
4358 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4359
4360 failed:
4361 hci_dev_unlock(hdev);
4362 return err;
4363 }
4364
4365 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4366 void *data, u16 len)
4367 {
4368 struct mgmt_mode *cp = data;
4369 bool changed, use_changed;
4370 int err;
4371
4372 BT_DBG("request for %s", hdev->name);
4373
4374 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4375 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4376 MGMT_STATUS_INVALID_PARAMS);
4377
4378 hci_dev_lock(hdev);
4379
4380 if (cp->val)
4381 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4382 &hdev->dev_flags);
4383 else
4384 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4385 &hdev->dev_flags);
4386
4387 if (cp->val == 0x02)
4388 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4389 &hdev->dev_flags);
4390 else
4391 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4392 &hdev->dev_flags);
4393
4394 if (hdev_is_powered(hdev) && use_changed &&
4395 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4396 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4397 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4398 sizeof(mode), &mode);
4399 }
4400
4401 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4402 if (err < 0)
4403 goto unlock;
4404
4405 if (changed)
4406 err = new_settings(hdev, sk);
4407
4408 unlock:
4409 hci_dev_unlock(hdev);
4410 return err;
4411 }
4412
4413 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4414 u16 len)
4415 {
4416 struct mgmt_cp_set_privacy *cp = cp_data;
4417 bool changed;
4418 int err;
4419
4420 BT_DBG("request for %s", hdev->name);
4421
4422 if (!lmp_le_capable(hdev))
4423 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4424 MGMT_STATUS_NOT_SUPPORTED);
4425
4426 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4427 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4428 MGMT_STATUS_INVALID_PARAMS);
4429
4430 if (hdev_is_powered(hdev))
4431 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4432 MGMT_STATUS_REJECTED);
4433
4434 hci_dev_lock(hdev);
4435
4436 /* If user space supports this command it is also expected to
4437 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4438 */
4439 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4440
4441 if (cp->privacy) {
4442 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4443 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4444 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4445 } else {
4446 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4447 memset(hdev->irk, 0, sizeof(hdev->irk));
4448 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4449 }
4450
4451 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4452 if (err < 0)
4453 goto unlock;
4454
4455 if (changed)
4456 err = new_settings(hdev, sk);
4457
4458 unlock:
4459 hci_dev_unlock(hdev);
4460 return err;
4461 }
4462
4463 static bool irk_is_valid(struct mgmt_irk_info *irk)
4464 {
4465 switch (irk->addr.type) {
4466 case BDADDR_LE_PUBLIC:
4467 return true;
4468
4469 case BDADDR_LE_RANDOM:
4470 /* Two most significant bits shall be set */
4471 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4472 return false;
4473 return true;
4474 }
4475
4476 return false;
4477 }
4478
4479 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4480 u16 len)
4481 {
4482 struct mgmt_cp_load_irks *cp = cp_data;
4483 u16 irk_count, expected_len;
4484 int i, err;
4485
4486 BT_DBG("request for %s", hdev->name);
4487
4488 if (!lmp_le_capable(hdev))
4489 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4490 MGMT_STATUS_NOT_SUPPORTED);
4491
4492 irk_count = __le16_to_cpu(cp->irk_count);
4493
4494 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4495 if (expected_len != len) {
4496 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4497 expected_len, len);
4498 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4499 MGMT_STATUS_INVALID_PARAMS);
4500 }
4501
4502 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4503
4504 for (i = 0; i < irk_count; i++) {
4505 struct mgmt_irk_info *key = &cp->irks[i];
4506
4507 if (!irk_is_valid(key))
4508 return cmd_status(sk, hdev->id,
4509 MGMT_OP_LOAD_IRKS,
4510 MGMT_STATUS_INVALID_PARAMS);
4511 }
4512
4513 hci_dev_lock(hdev);
4514
4515 hci_smp_irks_clear(hdev);
4516
4517 for (i = 0; i < irk_count; i++) {
4518 struct mgmt_irk_info *irk = &cp->irks[i];
4519 u8 addr_type;
4520
4521 if (irk->addr.type == BDADDR_LE_PUBLIC)
4522 addr_type = ADDR_LE_DEV_PUBLIC;
4523 else
4524 addr_type = ADDR_LE_DEV_RANDOM;
4525
4526 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4527 BDADDR_ANY);
4528 }
4529
4530 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4531
4532 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4533
4534 hci_dev_unlock(hdev);
4535
4536 return err;
4537 }
4538
4539 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4540 {
4541 if (key->master != 0x00 && key->master != 0x01)
4542 return false;
4543
4544 switch (key->addr.type) {
4545 case BDADDR_LE_PUBLIC:
4546 return true;
4547
4548 case BDADDR_LE_RANDOM:
4549 /* Two most significant bits shall be set */
4550 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4551 return false;
4552 return true;
4553 }
4554
4555 return false;
4556 }
4557
4558 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4559 void *cp_data, u16 len)
4560 {
4561 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4562 u16 key_count, expected_len;
4563 int i, err;
4564
4565 BT_DBG("request for %s", hdev->name);
4566
4567 if (!lmp_le_capable(hdev))
4568 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4569 MGMT_STATUS_NOT_SUPPORTED);
4570
4571 key_count = __le16_to_cpu(cp->key_count);
4572
4573 expected_len = sizeof(*cp) + key_count *
4574 sizeof(struct mgmt_ltk_info);
4575 if (expected_len != len) {
4576 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4577 expected_len, len);
4578 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4579 MGMT_STATUS_INVALID_PARAMS);
4580 }
4581
4582 BT_DBG("%s key_count %u", hdev->name, key_count);
4583
4584 for (i = 0; i < key_count; i++) {
4585 struct mgmt_ltk_info *key = &cp->keys[i];
4586
4587 if (!ltk_is_valid(key))
4588 return cmd_status(sk, hdev->id,
4589 MGMT_OP_LOAD_LONG_TERM_KEYS,
4590 MGMT_STATUS_INVALID_PARAMS);
4591 }
4592
4593 hci_dev_lock(hdev);
4594
4595 hci_smp_ltks_clear(hdev);
4596
4597 for (i = 0; i < key_count; i++) {
4598 struct mgmt_ltk_info *key = &cp->keys[i];
4599 u8 type, addr_type, authenticated;
4600
4601 if (key->addr.type == BDADDR_LE_PUBLIC)
4602 addr_type = ADDR_LE_DEV_PUBLIC;
4603 else
4604 addr_type = ADDR_LE_DEV_RANDOM;
4605
4606 if (key->master)
4607 type = SMP_LTK;
4608 else
4609 type = SMP_LTK_SLAVE;
4610
4611 switch (key->type) {
4612 case MGMT_LTK_UNAUTHENTICATED:
4613 authenticated = 0x00;
4614 break;
4615 case MGMT_LTK_AUTHENTICATED:
4616 authenticated = 0x01;
4617 break;
4618 default:
4619 continue;
4620 }
4621
4622 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4623 authenticated, key->val, key->enc_size, key->ediv,
4624 key->rand);
4625 }
4626
4627 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4628 NULL, 0);
4629
4630 hci_dev_unlock(hdev);
4631
4632 return err;
4633 }
4634
4635 struct cmd_conn_lookup {
4636 struct hci_conn *conn;
4637 bool valid_tx_power;
4638 u8 mgmt_status;
4639 };
4640
4641 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4642 {
4643 struct cmd_conn_lookup *match = data;
4644 struct mgmt_cp_get_conn_info *cp;
4645 struct mgmt_rp_get_conn_info rp;
4646 struct hci_conn *conn = cmd->user_data;
4647
4648 if (conn != match->conn)
4649 return;
4650
4651 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4652
4653 memset(&rp, 0, sizeof(rp));
4654 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4655 rp.addr.type = cp->addr.type;
4656
4657 if (!match->mgmt_status) {
4658 rp.rssi = conn->rssi;
4659
4660 if (match->valid_tx_power) {
4661 rp.tx_power = conn->tx_power;
4662 rp.max_tx_power = conn->max_tx_power;
4663 } else {
4664 rp.tx_power = HCI_TX_POWER_INVALID;
4665 rp.max_tx_power = HCI_TX_POWER_INVALID;
4666 }
4667 }
4668
4669 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4670 match->mgmt_status, &rp, sizeof(rp));
4671
4672 hci_conn_drop(conn);
4673
4674 mgmt_pending_remove(cmd);
4675 }
4676
4677 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4678 {
4679 struct hci_cp_read_rssi *cp;
4680 struct hci_conn *conn;
4681 struct cmd_conn_lookup match;
4682 u16 handle;
4683
4684 BT_DBG("status 0x%02x", status);
4685
4686 hci_dev_lock(hdev);
4687
4688 /* TX power data is valid in case request completed successfully,
4689 * otherwise we assume it's not valid. At the moment we assume that
4690 * either both or none of current and max values are valid to keep code
4691 * simple.
4692 */
4693 match.valid_tx_power = !status;
4694
4695 /* Commands sent in request are either Read RSSI or Read Transmit Power
4696 * Level so we check which one was last sent to retrieve connection
4697 * handle. Both commands have handle as first parameter so it's safe to
4698 * cast data on the same command struct.
4699 *
4700 * First command sent is always Read RSSI and we fail only if it fails.
4701 * In other case we simply override error to indicate success as we
4702 * already remembered if TX power value is actually valid.
4703 */
4704 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4705 if (!cp) {
4706 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4707 status = 0;
4708 }
4709
4710 if (!cp) {
4711 BT_ERR("invalid sent_cmd in response");
4712 goto unlock;
4713 }
4714
4715 handle = __le16_to_cpu(cp->handle);
4716 conn = hci_conn_hash_lookup_handle(hdev, handle);
4717 if (!conn) {
4718 BT_ERR("unknown handle (%d) in response", handle);
4719 goto unlock;
4720 }
4721
4722 match.conn = conn;
4723 match.mgmt_status = mgmt_status(status);
4724
4725 /* Cache refresh is complete, now reply for mgmt request for given
4726 * connection only.
4727 */
4728 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4729 get_conn_info_complete, &match);
4730
4731 unlock:
4732 hci_dev_unlock(hdev);
4733 }
4734
4735 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4736 u16 len)
4737 {
4738 struct mgmt_cp_get_conn_info *cp = data;
4739 struct mgmt_rp_get_conn_info rp;
4740 struct hci_conn *conn;
4741 unsigned long conn_info_age;
4742 int err = 0;
4743
4744 BT_DBG("%s", hdev->name);
4745
4746 memset(&rp, 0, sizeof(rp));
4747 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4748 rp.addr.type = cp->addr.type;
4749
4750 if (!bdaddr_type_is_valid(cp->addr.type))
4751 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4752 MGMT_STATUS_INVALID_PARAMS,
4753 &rp, sizeof(rp));
4754
4755 hci_dev_lock(hdev);
4756
4757 if (!hdev_is_powered(hdev)) {
4758 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4759 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4760 goto unlock;
4761 }
4762
4763 if (cp->addr.type == BDADDR_BREDR)
4764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4765 &cp->addr.bdaddr);
4766 else
4767 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4768
4769 if (!conn || conn->state != BT_CONNECTED) {
4770 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4771 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4772 goto unlock;
4773 }
4774
4775 /* To avoid client trying to guess when to poll again for information we
4776 * calculate conn info age as random value between min/max set in hdev.
4777 */
4778 conn_info_age = hdev->conn_info_min_age +
4779 prandom_u32_max(hdev->conn_info_max_age -
4780 hdev->conn_info_min_age);
4781
4782 /* Query controller to refresh cached values if they are too old or were
4783 * never read.
4784 */
4785 if (time_after(jiffies, conn->conn_info_timestamp +
4786 msecs_to_jiffies(conn_info_age)) ||
4787 !conn->conn_info_timestamp) {
4788 struct hci_request req;
4789 struct hci_cp_read_tx_power req_txp_cp;
4790 struct hci_cp_read_rssi req_rssi_cp;
4791 struct pending_cmd *cmd;
4792
4793 hci_req_init(&req, hdev);
4794 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4795 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4796 &req_rssi_cp);
4797
4798 /* For LE links TX power does not change thus we don't need to
4799 * query for it once value is known.
4800 */
4801 if (!bdaddr_type_is_le(cp->addr.type) ||
4802 conn->tx_power == HCI_TX_POWER_INVALID) {
4803 req_txp_cp.handle = cpu_to_le16(conn->handle);
4804 req_txp_cp.type = 0x00;
4805 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4806 sizeof(req_txp_cp), &req_txp_cp);
4807 }
4808
4809 /* Max TX power needs to be read only once per connection */
4810 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4811 req_txp_cp.handle = cpu_to_le16(conn->handle);
4812 req_txp_cp.type = 0x01;
4813 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4814 sizeof(req_txp_cp), &req_txp_cp);
4815 }
4816
4817 err = hci_req_run(&req, conn_info_refresh_complete);
4818 if (err < 0)
4819 goto unlock;
4820
4821 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4822 data, len);
4823 if (!cmd) {
4824 err = -ENOMEM;
4825 goto unlock;
4826 }
4827
4828 hci_conn_hold(conn);
4829 cmd->user_data = conn;
4830
4831 conn->conn_info_timestamp = jiffies;
4832 } else {
4833 /* Cache is valid, just reply with values cached in hci_conn */
4834 rp.rssi = conn->rssi;
4835 rp.tx_power = conn->tx_power;
4836 rp.max_tx_power = conn->max_tx_power;
4837
4838 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4839 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4840 }
4841
4842 unlock:
4843 hci_dev_unlock(hdev);
4844 return err;
4845 }
4846
4847 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4848 {
4849 struct mgmt_cp_get_clock_info *cp;
4850 struct mgmt_rp_get_clock_info rp;
4851 struct hci_cp_read_clock *hci_cp;
4852 struct pending_cmd *cmd;
4853 struct hci_conn *conn;
4854
4855 BT_DBG("%s status %u", hdev->name, status);
4856
4857 hci_dev_lock(hdev);
4858
4859 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4860 if (!hci_cp)
4861 goto unlock;
4862
4863 if (hci_cp->which) {
4864 u16 handle = __le16_to_cpu(hci_cp->handle);
4865 conn = hci_conn_hash_lookup_handle(hdev, handle);
4866 } else {
4867 conn = NULL;
4868 }
4869
4870 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4871 if (!cmd)
4872 goto unlock;
4873
4874 cp = cmd->param;
4875
4876 memset(&rp, 0, sizeof(rp));
4877 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4878
4879 if (status)
4880 goto send_rsp;
4881
4882 rp.local_clock = cpu_to_le32(hdev->clock);
4883
4884 if (conn) {
4885 rp.piconet_clock = cpu_to_le32(conn->clock);
4886 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4887 }
4888
4889 send_rsp:
4890 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
4891 &rp, sizeof(rp));
4892 mgmt_pending_remove(cmd);
4893 if (conn)
4894 hci_conn_drop(conn);
4895
4896 unlock:
4897 hci_dev_unlock(hdev);
4898 }
4899
4900 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4901 u16 len)
4902 {
4903 struct mgmt_cp_get_clock_info *cp = data;
4904 struct mgmt_rp_get_clock_info rp;
4905 struct hci_cp_read_clock hci_cp;
4906 struct pending_cmd *cmd;
4907 struct hci_request req;
4908 struct hci_conn *conn;
4909 int err;
4910
4911 BT_DBG("%s", hdev->name);
4912
4913 memset(&rp, 0, sizeof(rp));
4914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4915 rp.addr.type = cp->addr.type;
4916
4917 if (cp->addr.type != BDADDR_BREDR)
4918 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4919 MGMT_STATUS_INVALID_PARAMS,
4920 &rp, sizeof(rp));
4921
4922 hci_dev_lock(hdev);
4923
4924 if (!hdev_is_powered(hdev)) {
4925 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4926 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4927 goto unlock;
4928 }
4929
4930 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4932 &cp->addr.bdaddr);
4933 if (!conn || conn->state != BT_CONNECTED) {
4934 err = cmd_complete(sk, hdev->id,
4935 MGMT_OP_GET_CLOCK_INFO,
4936 MGMT_STATUS_NOT_CONNECTED,
4937 &rp, sizeof(rp));
4938 goto unlock;
4939 }
4940 } else {
4941 conn = NULL;
4942 }
4943
4944 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
4945 if (!cmd) {
4946 err = -ENOMEM;
4947 goto unlock;
4948 }
4949
4950 hci_req_init(&req, hdev);
4951
4952 memset(&hci_cp, 0, sizeof(hci_cp));
4953 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4954
4955 if (conn) {
4956 hci_conn_hold(conn);
4957 cmd->user_data = conn;
4958
4959 hci_cp.handle = cpu_to_le16(conn->handle);
4960 hci_cp.which = 0x01; /* Piconet clock */
4961 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4962 }
4963
4964 err = hci_req_run(&req, get_clock_info_complete);
4965 if (err < 0)
4966 mgmt_pending_remove(cmd);
4967
4968 unlock:
4969 hci_dev_unlock(hdev);
4970 return err;
4971 }
4972
4973 static void device_added(struct sock *sk, struct hci_dev *hdev,
4974 bdaddr_t *bdaddr, u8 type, u8 action)
4975 {
4976 struct mgmt_ev_device_added ev;
4977
4978 bacpy(&ev.addr.bdaddr, bdaddr);
4979 ev.addr.type = type;
4980 ev.action = action;
4981
4982 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
4983 }
4984
4985 static int add_device(struct sock *sk, struct hci_dev *hdev,
4986 void *data, u16 len)
4987 {
4988 struct mgmt_cp_add_device *cp = data;
4989 u8 auto_conn, addr_type;
4990 int err;
4991
4992 BT_DBG("%s", hdev->name);
4993
4994 if (!bdaddr_type_is_le(cp->addr.type) ||
4995 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
4996 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
4997 MGMT_STATUS_INVALID_PARAMS,
4998 &cp->addr, sizeof(cp->addr));
4999
5000 if (cp->action != 0x00 && cp->action != 0x01)
5001 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5002 MGMT_STATUS_INVALID_PARAMS,
5003 &cp->addr, sizeof(cp->addr));
5004
5005 hci_dev_lock(hdev);
5006
5007 if (cp->addr.type == BDADDR_LE_PUBLIC)
5008 addr_type = ADDR_LE_DEV_PUBLIC;
5009 else
5010 addr_type = ADDR_LE_DEV_RANDOM;
5011
5012 if (cp->action)
5013 auto_conn = HCI_AUTO_CONN_ALWAYS;
5014 else
5015 auto_conn = HCI_AUTO_CONN_DISABLED;
5016
5017 if (hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type, auto_conn,
5018 hdev->le_conn_min_interval,
5019 hdev->le_conn_max_interval) < 0) {
5020 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5021 MGMT_STATUS_FAILED,
5022 &cp->addr, sizeof(cp->addr));
5023 goto unlock;
5024 }
5025
5026 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5027
5028 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5029 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5030
5031 unlock:
5032 hci_dev_unlock(hdev);
5033 return err;
5034 }
5035
5036 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5037 bdaddr_t *bdaddr, u8 type)
5038 {
5039 struct mgmt_ev_device_removed ev;
5040
5041 bacpy(&ev.addr.bdaddr, bdaddr);
5042 ev.addr.type = type;
5043
5044 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5045 }
5046
5047 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5048 void *data, u16 len)
5049 {
5050 struct mgmt_cp_remove_device *cp = data;
5051 int err;
5052
5053 BT_DBG("%s", hdev->name);
5054
5055 hci_dev_lock(hdev);
5056
5057 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5058 u8 addr_type;
5059
5060 if (!bdaddr_type_is_le(cp->addr.type)) {
5061 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5062 MGMT_STATUS_INVALID_PARAMS,
5063 &cp->addr, sizeof(cp->addr));
5064 goto unlock;
5065 }
5066
5067 if (cp->addr.type == BDADDR_LE_PUBLIC)
5068 addr_type = ADDR_LE_DEV_PUBLIC;
5069 else
5070 addr_type = ADDR_LE_DEV_RANDOM;
5071
5072 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
5073
5074 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5075 } else {
5076 if (cp->addr.type) {
5077 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5078 MGMT_STATUS_INVALID_PARAMS,
5079 &cp->addr, sizeof(cp->addr));
5080 goto unlock;
5081 }
5082
5083 hci_conn_params_clear(hdev);
5084 }
5085
5086 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5087 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5088
5089 unlock:
5090 hci_dev_unlock(hdev);
5091 return err;
5092 }
5093
5094 static const struct mgmt_handler {
5095 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5096 u16 data_len);
5097 bool var_len;
5098 size_t data_len;
5099 } mgmt_handlers[] = {
5100 { NULL }, /* 0x0000 (no command) */
5101 { read_version, false, MGMT_READ_VERSION_SIZE },
5102 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5103 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5104 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5105 { set_powered, false, MGMT_SETTING_SIZE },
5106 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5107 { set_connectable, false, MGMT_SETTING_SIZE },
5108 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5109 { set_pairable, false, MGMT_SETTING_SIZE },
5110 { set_link_security, false, MGMT_SETTING_SIZE },
5111 { set_ssp, false, MGMT_SETTING_SIZE },
5112 { set_hs, false, MGMT_SETTING_SIZE },
5113 { set_le, false, MGMT_SETTING_SIZE },
5114 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5115 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5116 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5117 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5118 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5119 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5120 { disconnect, false, MGMT_DISCONNECT_SIZE },
5121 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5122 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5123 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5124 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5125 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5126 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5127 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5128 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5129 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5130 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5131 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5132 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5133 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5134 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5135 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5136 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5137 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5138 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5139 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5140 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5141 { set_advertising, false, MGMT_SETTING_SIZE },
5142 { set_bredr, false, MGMT_SETTING_SIZE },
5143 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5144 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5145 { set_secure_conn, false, MGMT_SETTING_SIZE },
5146 { set_debug_keys, false, MGMT_SETTING_SIZE },
5147 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5148 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5149 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5150 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5151 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5152 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5153 };
5154
5155 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5156 {
5157 void *buf;
5158 u8 *cp;
5159 struct mgmt_hdr *hdr;
5160 u16 opcode, index, len;
5161 struct hci_dev *hdev = NULL;
5162 const struct mgmt_handler *handler;
5163 int err;
5164
5165 BT_DBG("got %zu bytes", msglen);
5166
5167 if (msglen < sizeof(*hdr))
5168 return -EINVAL;
5169
5170 buf = kmalloc(msglen, GFP_KERNEL);
5171 if (!buf)
5172 return -ENOMEM;
5173
5174 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5175 err = -EFAULT;
5176 goto done;
5177 }
5178
5179 hdr = buf;
5180 opcode = __le16_to_cpu(hdr->opcode);
5181 index = __le16_to_cpu(hdr->index);
5182 len = __le16_to_cpu(hdr->len);
5183
5184 if (len != msglen - sizeof(*hdr)) {
5185 err = -EINVAL;
5186 goto done;
5187 }
5188
5189 if (index != MGMT_INDEX_NONE) {
5190 hdev = hci_dev_get(index);
5191 if (!hdev) {
5192 err = cmd_status(sk, index, opcode,
5193 MGMT_STATUS_INVALID_INDEX);
5194 goto done;
5195 }
5196
5197 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5198 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) ||
5199 test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
5200 err = cmd_status(sk, index, opcode,
5201 MGMT_STATUS_INVALID_INDEX);
5202 goto done;
5203 }
5204 }
5205
5206 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5207 mgmt_handlers[opcode].func == NULL) {
5208 BT_DBG("Unknown op %u", opcode);
5209 err = cmd_status(sk, index, opcode,
5210 MGMT_STATUS_UNKNOWN_COMMAND);
5211 goto done;
5212 }
5213
5214 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
5215 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
5216 err = cmd_status(sk, index, opcode,
5217 MGMT_STATUS_INVALID_INDEX);
5218 goto done;
5219 }
5220
5221 handler = &mgmt_handlers[opcode];
5222
5223 if ((handler->var_len && len < handler->data_len) ||
5224 (!handler->var_len && len != handler->data_len)) {
5225 err = cmd_status(sk, index, opcode,
5226 MGMT_STATUS_INVALID_PARAMS);
5227 goto done;
5228 }
5229
5230 if (hdev)
5231 mgmt_init_hdev(sk, hdev);
5232
5233 cp = buf + sizeof(*hdr);
5234
5235 err = handler->func(sk, hdev, cp, len);
5236 if (err < 0)
5237 goto done;
5238
5239 err = msglen;
5240
5241 done:
5242 if (hdev)
5243 hci_dev_put(hdev);
5244
5245 kfree(buf);
5246 return err;
5247 }
5248
5249 void mgmt_index_added(struct hci_dev *hdev)
5250 {
5251 if (hdev->dev_type != HCI_BREDR)
5252 return;
5253
5254 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5255 }
5256
5257 void mgmt_index_removed(struct hci_dev *hdev)
5258 {
5259 u8 status = MGMT_STATUS_INVALID_INDEX;
5260
5261 if (hdev->dev_type != HCI_BREDR)
5262 return;
5263
5264 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5265
5266 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5267 }
5268
5269 /* This function requires the caller holds hdev->lock */
5270 static void restart_le_auto_conns(struct hci_dev *hdev)
5271 {
5272 struct hci_conn_params *p;
5273
5274 list_for_each_entry(p, &hdev->le_conn_params, list) {
5275 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
5276 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5277 }
5278 }
5279
5280 static void powered_complete(struct hci_dev *hdev, u8 status)
5281 {
5282 struct cmd_lookup match = { NULL, hdev };
5283
5284 BT_DBG("status 0x%02x", status);
5285
5286 hci_dev_lock(hdev);
5287
5288 restart_le_auto_conns(hdev);
5289
5290 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5291
5292 new_settings(hdev, match.sk);
5293
5294 hci_dev_unlock(hdev);
5295
5296 if (match.sk)
5297 sock_put(match.sk);
5298 }
5299
5300 static int powered_update_hci(struct hci_dev *hdev)
5301 {
5302 struct hci_request req;
5303 u8 link_sec;
5304
5305 hci_req_init(&req, hdev);
5306
5307 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5308 !lmp_host_ssp_capable(hdev)) {
5309 u8 ssp = 1;
5310
5311 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5312 }
5313
5314 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5315 lmp_bredr_capable(hdev)) {
5316 struct hci_cp_write_le_host_supported cp;
5317
5318 cp.le = 1;
5319 cp.simul = lmp_le_br_capable(hdev);
5320
5321 /* Check first if we already have the right
5322 * host state (host features set)
5323 */
5324 if (cp.le != lmp_host_le_capable(hdev) ||
5325 cp.simul != lmp_host_le_br_capable(hdev))
5326 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5327 sizeof(cp), &cp);
5328 }
5329
5330 if (lmp_le_capable(hdev)) {
5331 /* Make sure the controller has a good default for
5332 * advertising data. This also applies to the case
5333 * where BR/EDR was toggled during the AUTO_OFF phase.
5334 */
5335 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5336 update_adv_data(&req);
5337 update_scan_rsp_data(&req);
5338 }
5339
5340 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5341 enable_advertising(&req);
5342 }
5343
5344 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5345 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5346 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5347 sizeof(link_sec), &link_sec);
5348
5349 if (lmp_bredr_capable(hdev)) {
5350 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5351 set_bredr_scan(&req);
5352 update_class(&req);
5353 update_name(&req);
5354 update_eir(&req);
5355 }
5356
5357 return hci_req_run(&req, powered_complete);
5358 }
5359
5360 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5361 {
5362 struct cmd_lookup match = { NULL, hdev };
5363 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5364 u8 zero_cod[] = { 0, 0, 0 };
5365 int err;
5366
5367 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5368 return 0;
5369
5370 if (powered) {
5371 if (powered_update_hci(hdev) == 0)
5372 return 0;
5373
5374 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5375 &match);
5376 goto new_settings;
5377 }
5378
5379 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5380 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5381
5382 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5383 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5384 zero_cod, sizeof(zero_cod), NULL);
5385
5386 new_settings:
5387 err = new_settings(hdev, match.sk);
5388
5389 if (match.sk)
5390 sock_put(match.sk);
5391
5392 return err;
5393 }
5394
5395 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5396 {
5397 struct pending_cmd *cmd;
5398 u8 status;
5399
5400 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5401 if (!cmd)
5402 return;
5403
5404 if (err == -ERFKILL)
5405 status = MGMT_STATUS_RFKILLED;
5406 else
5407 status = MGMT_STATUS_FAILED;
5408
5409 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5410
5411 mgmt_pending_remove(cmd);
5412 }
5413
5414 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5415 {
5416 struct hci_request req;
5417
5418 hci_dev_lock(hdev);
5419
5420 /* When discoverable timeout triggers, then just make sure
5421 * the limited discoverable flag is cleared. Even in the case
5422 * of a timeout triggered from general discoverable, it is
5423 * safe to unconditionally clear the flag.
5424 */
5425 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5426 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5427
5428 hci_req_init(&req, hdev);
5429 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5430 u8 scan = SCAN_PAGE;
5431 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5432 sizeof(scan), &scan);
5433 }
5434 update_class(&req);
5435 update_adv_data(&req);
5436 hci_req_run(&req, NULL);
5437
5438 hdev->discov_timeout = 0;
5439
5440 new_settings(hdev, NULL);
5441
5442 hci_dev_unlock(hdev);
5443 }
5444
5445 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5446 {
5447 bool changed;
5448
5449 /* Nothing needed here if there's a pending command since that
5450 * commands request completion callback takes care of everything
5451 * necessary.
5452 */
5453 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5454 return;
5455
5456 /* Powering off may clear the scan mode - don't let that interfere */
5457 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5458 return;
5459
5460 if (discoverable) {
5461 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5462 } else {
5463 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5464 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5465 }
5466
5467 if (changed) {
5468 struct hci_request req;
5469
5470 /* In case this change in discoverable was triggered by
5471 * a disabling of connectable there could be a need to
5472 * update the advertising flags.
5473 */
5474 hci_req_init(&req, hdev);
5475 update_adv_data(&req);
5476 hci_req_run(&req, NULL);
5477
5478 new_settings(hdev, NULL);
5479 }
5480 }
5481
5482 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5483 {
5484 bool changed;
5485
5486 /* Nothing needed here if there's a pending command since that
5487 * commands request completion callback takes care of everything
5488 * necessary.
5489 */
5490 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5491 return;
5492
5493 /* Powering off may clear the scan mode - don't let that interfere */
5494 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5495 return;
5496
5497 if (connectable)
5498 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5499 else
5500 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5501
5502 if (changed)
5503 new_settings(hdev, NULL);
5504 }
5505
5506 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5507 {
5508 /* Powering off may stop advertising - don't let that interfere */
5509 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5510 return;
5511
5512 if (advertising)
5513 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5514 else
5515 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5516 }
5517
5518 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5519 {
5520 u8 mgmt_err = mgmt_status(status);
5521
5522 if (scan & SCAN_PAGE)
5523 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5524 cmd_status_rsp, &mgmt_err);
5525
5526 if (scan & SCAN_INQUIRY)
5527 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5528 cmd_status_rsp, &mgmt_err);
5529 }
5530
5531 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5532 bool persistent)
5533 {
5534 struct mgmt_ev_new_link_key ev;
5535
5536 memset(&ev, 0, sizeof(ev));
5537
5538 ev.store_hint = persistent;
5539 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5540 ev.key.addr.type = BDADDR_BREDR;
5541 ev.key.type = key->type;
5542 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5543 ev.key.pin_len = key->pin_len;
5544
5545 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5546 }
5547
5548 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5549 {
5550 if (ltk->authenticated)
5551 return MGMT_LTK_AUTHENTICATED;
5552
5553 return MGMT_LTK_UNAUTHENTICATED;
5554 }
5555
5556 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5557 {
5558 struct mgmt_ev_new_long_term_key ev;
5559
5560 memset(&ev, 0, sizeof(ev));
5561
5562 /* Devices using resolvable or non-resolvable random addresses
5563 * without providing an indentity resolving key don't require
5564 * to store long term keys. Their addresses will change the
5565 * next time around.
5566 *
5567 * Only when a remote device provides an identity address
5568 * make sure the long term key is stored. If the remote
5569 * identity is known, the long term keys are internally
5570 * mapped to the identity address. So allow static random
5571 * and public addresses here.
5572 */
5573 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5574 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5575 ev.store_hint = 0x00;
5576 else
5577 ev.store_hint = persistent;
5578
5579 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5580 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5581 ev.key.type = mgmt_ltk_type(key);
5582 ev.key.enc_size = key->enc_size;
5583 ev.key.ediv = key->ediv;
5584 ev.key.rand = key->rand;
5585
5586 if (key->type == SMP_LTK)
5587 ev.key.master = 1;
5588
5589 memcpy(ev.key.val, key->val, sizeof(key->val));
5590
5591 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5592 }
5593
5594 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5595 {
5596 struct mgmt_ev_new_irk ev;
5597
5598 memset(&ev, 0, sizeof(ev));
5599
5600 /* For identity resolving keys from devices that are already
5601 * using a public address or static random address, do not
5602 * ask for storing this key. The identity resolving key really
5603 * is only mandatory for devices using resovlable random
5604 * addresses.
5605 *
5606 * Storing all identity resolving keys has the downside that
5607 * they will be also loaded on next boot of they system. More
5608 * identity resolving keys, means more time during scanning is
5609 * needed to actually resolve these addresses.
5610 */
5611 if (bacmp(&irk->rpa, BDADDR_ANY))
5612 ev.store_hint = 0x01;
5613 else
5614 ev.store_hint = 0x00;
5615
5616 bacpy(&ev.rpa, &irk->rpa);
5617 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5618 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5619 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5620
5621 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5622 }
5623
5624 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5625 bool persistent)
5626 {
5627 struct mgmt_ev_new_csrk ev;
5628
5629 memset(&ev, 0, sizeof(ev));
5630
5631 /* Devices using resolvable or non-resolvable random addresses
5632 * without providing an indentity resolving key don't require
5633 * to store signature resolving keys. Their addresses will change
5634 * the next time around.
5635 *
5636 * Only when a remote device provides an identity address
5637 * make sure the signature resolving key is stored. So allow
5638 * static random and public addresses here.
5639 */
5640 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5641 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5642 ev.store_hint = 0x00;
5643 else
5644 ev.store_hint = persistent;
5645
5646 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5647 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5648 ev.key.master = csrk->master;
5649 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5650
5651 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5652 }
5653
5654 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5655 u8 data_len)
5656 {
5657 eir[eir_len++] = sizeof(type) + data_len;
5658 eir[eir_len++] = type;
5659 memcpy(&eir[eir_len], data, data_len);
5660 eir_len += data_len;
5661
5662 return eir_len;
5663 }
5664
5665 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5666 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5667 u8 *dev_class)
5668 {
5669 char buf[512];
5670 struct mgmt_ev_device_connected *ev = (void *) buf;
5671 u16 eir_len = 0;
5672
5673 bacpy(&ev->addr.bdaddr, bdaddr);
5674 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5675
5676 ev->flags = __cpu_to_le32(flags);
5677
5678 if (name_len > 0)
5679 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5680 name, name_len);
5681
5682 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5683 eir_len = eir_append_data(ev->eir, eir_len,
5684 EIR_CLASS_OF_DEV, dev_class, 3);
5685
5686 ev->eir_len = cpu_to_le16(eir_len);
5687
5688 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5689 sizeof(*ev) + eir_len, NULL);
5690 }
5691
5692 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5693 {
5694 struct mgmt_cp_disconnect *cp = cmd->param;
5695 struct sock **sk = data;
5696 struct mgmt_rp_disconnect rp;
5697
5698 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5699 rp.addr.type = cp->addr.type;
5700
5701 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5702 sizeof(rp));
5703
5704 *sk = cmd->sk;
5705 sock_hold(*sk);
5706
5707 mgmt_pending_remove(cmd);
5708 }
5709
5710 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5711 {
5712 struct hci_dev *hdev = data;
5713 struct mgmt_cp_unpair_device *cp = cmd->param;
5714 struct mgmt_rp_unpair_device rp;
5715
5716 memset(&rp, 0, sizeof(rp));
5717 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5718 rp.addr.type = cp->addr.type;
5719
5720 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5721
5722 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5723
5724 mgmt_pending_remove(cmd);
5725 }
5726
5727 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5728 u8 link_type, u8 addr_type, u8 reason,
5729 bool mgmt_connected)
5730 {
5731 struct mgmt_ev_device_disconnected ev;
5732 struct pending_cmd *power_off;
5733 struct sock *sk = NULL;
5734
5735 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5736 if (power_off) {
5737 struct mgmt_mode *cp = power_off->param;
5738
5739 /* The connection is still in hci_conn_hash so test for 1
5740 * instead of 0 to know if this is the last one.
5741 */
5742 if (!cp->val && hci_conn_count(hdev) == 1) {
5743 cancel_delayed_work(&hdev->power_off);
5744 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5745 }
5746 }
5747
5748 if (!mgmt_connected)
5749 return;
5750
5751 if (link_type != ACL_LINK && link_type != LE_LINK)
5752 return;
5753
5754 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5755
5756 bacpy(&ev.addr.bdaddr, bdaddr);
5757 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5758 ev.reason = reason;
5759
5760 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5761
5762 if (sk)
5763 sock_put(sk);
5764
5765 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5766 hdev);
5767 }
5768
5769 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5770 u8 link_type, u8 addr_type, u8 status)
5771 {
5772 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5773 struct mgmt_cp_disconnect *cp;
5774 struct mgmt_rp_disconnect rp;
5775 struct pending_cmd *cmd;
5776
5777 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5778 hdev);
5779
5780 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5781 if (!cmd)
5782 return;
5783
5784 cp = cmd->param;
5785
5786 if (bacmp(bdaddr, &cp->addr.bdaddr))
5787 return;
5788
5789 if (cp->addr.type != bdaddr_type)
5790 return;
5791
5792 bacpy(&rp.addr.bdaddr, bdaddr);
5793 rp.addr.type = bdaddr_type;
5794
5795 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5796 mgmt_status(status), &rp, sizeof(rp));
5797
5798 mgmt_pending_remove(cmd);
5799 }
5800
5801 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5802 u8 addr_type, u8 status)
5803 {
5804 struct mgmt_ev_connect_failed ev;
5805 struct pending_cmd *power_off;
5806
5807 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5808 if (power_off) {
5809 struct mgmt_mode *cp = power_off->param;
5810
5811 /* The connection is still in hci_conn_hash so test for 1
5812 * instead of 0 to know if this is the last one.
5813 */
5814 if (!cp->val && hci_conn_count(hdev) == 1) {
5815 cancel_delayed_work(&hdev->power_off);
5816 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5817 }
5818 }
5819
5820 bacpy(&ev.addr.bdaddr, bdaddr);
5821 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5822 ev.status = mgmt_status(status);
5823
5824 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5825 }
5826
5827 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5828 {
5829 struct mgmt_ev_pin_code_request ev;
5830
5831 bacpy(&ev.addr.bdaddr, bdaddr);
5832 ev.addr.type = BDADDR_BREDR;
5833 ev.secure = secure;
5834
5835 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5836 }
5837
5838 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5839 u8 status)
5840 {
5841 struct pending_cmd *cmd;
5842 struct mgmt_rp_pin_code_reply rp;
5843
5844 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5845 if (!cmd)
5846 return;
5847
5848 bacpy(&rp.addr.bdaddr, bdaddr);
5849 rp.addr.type = BDADDR_BREDR;
5850
5851 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5852 mgmt_status(status), &rp, sizeof(rp));
5853
5854 mgmt_pending_remove(cmd);
5855 }
5856
5857 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5858 u8 status)
5859 {
5860 struct pending_cmd *cmd;
5861 struct mgmt_rp_pin_code_reply rp;
5862
5863 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5864 if (!cmd)
5865 return;
5866
5867 bacpy(&rp.addr.bdaddr, bdaddr);
5868 rp.addr.type = BDADDR_BREDR;
5869
5870 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5871 mgmt_status(status), &rp, sizeof(rp));
5872
5873 mgmt_pending_remove(cmd);
5874 }
5875
5876 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5877 u8 link_type, u8 addr_type, u32 value,
5878 u8 confirm_hint)
5879 {
5880 struct mgmt_ev_user_confirm_request ev;
5881
5882 BT_DBG("%s", hdev->name);
5883
5884 bacpy(&ev.addr.bdaddr, bdaddr);
5885 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5886 ev.confirm_hint = confirm_hint;
5887 ev.value = cpu_to_le32(value);
5888
5889 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5890 NULL);
5891 }
5892
5893 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5894 u8 link_type, u8 addr_type)
5895 {
5896 struct mgmt_ev_user_passkey_request ev;
5897
5898 BT_DBG("%s", hdev->name);
5899
5900 bacpy(&ev.addr.bdaddr, bdaddr);
5901 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5902
5903 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5904 NULL);
5905 }
5906
5907 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5908 u8 link_type, u8 addr_type, u8 status,
5909 u8 opcode)
5910 {
5911 struct pending_cmd *cmd;
5912 struct mgmt_rp_user_confirm_reply rp;
5913 int err;
5914
5915 cmd = mgmt_pending_find(opcode, hdev);
5916 if (!cmd)
5917 return -ENOENT;
5918
5919 bacpy(&rp.addr.bdaddr, bdaddr);
5920 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5921 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5922 &rp, sizeof(rp));
5923
5924 mgmt_pending_remove(cmd);
5925
5926 return err;
5927 }
5928
5929 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5930 u8 link_type, u8 addr_type, u8 status)
5931 {
5932 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5933 status, MGMT_OP_USER_CONFIRM_REPLY);
5934 }
5935
5936 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5937 u8 link_type, u8 addr_type, u8 status)
5938 {
5939 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5940 status,
5941 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5942 }
5943
5944 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5945 u8 link_type, u8 addr_type, u8 status)
5946 {
5947 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5948 status, MGMT_OP_USER_PASSKEY_REPLY);
5949 }
5950
5951 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5952 u8 link_type, u8 addr_type, u8 status)
5953 {
5954 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5955 status,
5956 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5957 }
5958
5959 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5960 u8 link_type, u8 addr_type, u32 passkey,
5961 u8 entered)
5962 {
5963 struct mgmt_ev_passkey_notify ev;
5964
5965 BT_DBG("%s", hdev->name);
5966
5967 bacpy(&ev.addr.bdaddr, bdaddr);
5968 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5969 ev.passkey = __cpu_to_le32(passkey);
5970 ev.entered = entered;
5971
5972 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5973 }
5974
5975 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5976 u8 addr_type, u8 status)
5977 {
5978 struct mgmt_ev_auth_failed ev;
5979
5980 bacpy(&ev.addr.bdaddr, bdaddr);
5981 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5982 ev.status = mgmt_status(status);
5983
5984 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5985 }
5986
5987 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5988 {
5989 struct cmd_lookup match = { NULL, hdev };
5990 bool changed;
5991
5992 if (status) {
5993 u8 mgmt_err = mgmt_status(status);
5994 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5995 cmd_status_rsp, &mgmt_err);
5996 return;
5997 }
5998
5999 if (test_bit(HCI_AUTH, &hdev->flags))
6000 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6001 &hdev->dev_flags);
6002 else
6003 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6004 &hdev->dev_flags);
6005
6006 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6007 &match);
6008
6009 if (changed)
6010 new_settings(hdev, match.sk);
6011
6012 if (match.sk)
6013 sock_put(match.sk);
6014 }
6015
6016 static void clear_eir(struct hci_request *req)
6017 {
6018 struct hci_dev *hdev = req->hdev;
6019 struct hci_cp_write_eir cp;
6020
6021 if (!lmp_ext_inq_capable(hdev))
6022 return;
6023
6024 memset(hdev->eir, 0, sizeof(hdev->eir));
6025
6026 memset(&cp, 0, sizeof(cp));
6027
6028 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6029 }
6030
6031 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6032 {
6033 struct cmd_lookup match = { NULL, hdev };
6034 struct hci_request req;
6035 bool changed = false;
6036
6037 if (status) {
6038 u8 mgmt_err = mgmt_status(status);
6039
6040 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6041 &hdev->dev_flags)) {
6042 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6043 new_settings(hdev, NULL);
6044 }
6045
6046 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6047 &mgmt_err);
6048 return;
6049 }
6050
6051 if (enable) {
6052 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6053 } else {
6054 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6055 if (!changed)
6056 changed = test_and_clear_bit(HCI_HS_ENABLED,
6057 &hdev->dev_flags);
6058 else
6059 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6060 }
6061
6062 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6063
6064 if (changed)
6065 new_settings(hdev, match.sk);
6066
6067 if (match.sk)
6068 sock_put(match.sk);
6069
6070 hci_req_init(&req, hdev);
6071
6072 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6073 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6074 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6075 sizeof(enable), &enable);
6076 update_eir(&req);
6077 } else {
6078 clear_eir(&req);
6079 }
6080
6081 hci_req_run(&req, NULL);
6082 }
6083
6084 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6085 {
6086 struct cmd_lookup match = { NULL, hdev };
6087 bool changed = false;
6088
6089 if (status) {
6090 u8 mgmt_err = mgmt_status(status);
6091
6092 if (enable) {
6093 if (test_and_clear_bit(HCI_SC_ENABLED,
6094 &hdev->dev_flags))
6095 new_settings(hdev, NULL);
6096 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6097 }
6098
6099 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6100 cmd_status_rsp, &mgmt_err);
6101 return;
6102 }
6103
6104 if (enable) {
6105 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6106 } else {
6107 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6108 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6109 }
6110
6111 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6112 settings_rsp, &match);
6113
6114 if (changed)
6115 new_settings(hdev, match.sk);
6116
6117 if (match.sk)
6118 sock_put(match.sk);
6119 }
6120
6121 static void sk_lookup(struct pending_cmd *cmd, void *data)
6122 {
6123 struct cmd_lookup *match = data;
6124
6125 if (match->sk == NULL) {
6126 match->sk = cmd->sk;
6127 sock_hold(match->sk);
6128 }
6129 }
6130
6131 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6132 u8 status)
6133 {
6134 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6135
6136 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6137 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6138 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6139
6140 if (!status)
6141 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6142 NULL);
6143
6144 if (match.sk)
6145 sock_put(match.sk);
6146 }
6147
6148 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6149 {
6150 struct mgmt_cp_set_local_name ev;
6151 struct pending_cmd *cmd;
6152
6153 if (status)
6154 return;
6155
6156 memset(&ev, 0, sizeof(ev));
6157 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6158 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6159
6160 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6161 if (!cmd) {
6162 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6163
6164 /* If this is a HCI command related to powering on the
6165 * HCI dev don't send any mgmt signals.
6166 */
6167 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6168 return;
6169 }
6170
6171 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6172 cmd ? cmd->sk : NULL);
6173 }
6174
6175 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6176 u8 *randomizer192, u8 *hash256,
6177 u8 *randomizer256, u8 status)
6178 {
6179 struct pending_cmd *cmd;
6180
6181 BT_DBG("%s status %u", hdev->name, status);
6182
6183 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6184 if (!cmd)
6185 return;
6186
6187 if (status) {
6188 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6189 mgmt_status(status));
6190 } else {
6191 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6192 hash256 && randomizer256) {
6193 struct mgmt_rp_read_local_oob_ext_data rp;
6194
6195 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6196 memcpy(rp.randomizer192, randomizer192,
6197 sizeof(rp.randomizer192));
6198
6199 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6200 memcpy(rp.randomizer256, randomizer256,
6201 sizeof(rp.randomizer256));
6202
6203 cmd_complete(cmd->sk, hdev->id,
6204 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6205 &rp, sizeof(rp));
6206 } else {
6207 struct mgmt_rp_read_local_oob_data rp;
6208
6209 memcpy(rp.hash, hash192, sizeof(rp.hash));
6210 memcpy(rp.randomizer, randomizer192,
6211 sizeof(rp.randomizer));
6212
6213 cmd_complete(cmd->sk, hdev->id,
6214 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6215 &rp, sizeof(rp));
6216 }
6217 }
6218
6219 mgmt_pending_remove(cmd);
6220 }
6221
6222 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6223 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
6224 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
6225 u8 scan_rsp_len)
6226 {
6227 char buf[512];
6228 struct mgmt_ev_device_found *ev = (void *) buf;
6229 struct smp_irk *irk;
6230 size_t ev_size;
6231
6232 if (!hci_discovery_active(hdev))
6233 return;
6234
6235 /* Make sure that the buffer is big enough. The 5 extra bytes
6236 * are for the potential CoD field.
6237 */
6238 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6239 return;
6240
6241 memset(buf, 0, sizeof(buf));
6242
6243 irk = hci_get_irk(hdev, bdaddr, addr_type);
6244 if (irk) {
6245 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6246 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6247 } else {
6248 bacpy(&ev->addr.bdaddr, bdaddr);
6249 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6250 }
6251
6252 ev->rssi = rssi;
6253 if (cfm_name)
6254 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
6255 if (!ssp)
6256 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
6257
6258 if (eir_len > 0)
6259 memcpy(ev->eir, eir, eir_len);
6260
6261 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6262 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6263 dev_class, 3);
6264
6265 if (scan_rsp_len > 0)
6266 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6267
6268 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6269 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6270
6271 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6272 }
6273
6274 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6275 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6276 {
6277 struct mgmt_ev_device_found *ev;
6278 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6279 u16 eir_len;
6280
6281 ev = (struct mgmt_ev_device_found *) buf;
6282
6283 memset(buf, 0, sizeof(buf));
6284
6285 bacpy(&ev->addr.bdaddr, bdaddr);
6286 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6287 ev->rssi = rssi;
6288
6289 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6290 name_len);
6291
6292 ev->eir_len = cpu_to_le16(eir_len);
6293
6294 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6295 }
6296
6297 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6298 {
6299 struct mgmt_ev_discovering ev;
6300 struct pending_cmd *cmd;
6301
6302 BT_DBG("%s discovering %u", hdev->name, discovering);
6303
6304 if (discovering)
6305 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6306 else
6307 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6308
6309 if (cmd != NULL) {
6310 u8 type = hdev->discovery.type;
6311
6312 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6313 sizeof(type));
6314 mgmt_pending_remove(cmd);
6315 }
6316
6317 memset(&ev, 0, sizeof(ev));
6318 ev.type = hdev->discovery.type;
6319 ev.discovering = discovering;
6320
6321 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6322 }
6323
6324 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6325 {
6326 struct pending_cmd *cmd;
6327 struct mgmt_ev_device_blocked ev;
6328
6329 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6330
6331 bacpy(&ev.addr.bdaddr, bdaddr);
6332 ev.addr.type = type;
6333
6334 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6335 cmd ? cmd->sk : NULL);
6336 }
6337
6338 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6339 {
6340 struct pending_cmd *cmd;
6341 struct mgmt_ev_device_unblocked ev;
6342
6343 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6344
6345 bacpy(&ev.addr.bdaddr, bdaddr);
6346 ev.addr.type = type;
6347
6348 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6349 cmd ? cmd->sk : NULL);
6350 }
6351
6352 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6353 {
6354 BT_DBG("%s status %u", hdev->name, status);
6355
6356 /* Clear the advertising mgmt setting if we failed to re-enable it */
6357 if (status) {
6358 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6359 new_settings(hdev, NULL);
6360 }
6361 }
6362
6363 void mgmt_reenable_advertising(struct hci_dev *hdev)
6364 {
6365 struct hci_request req;
6366
6367 if (hci_conn_num(hdev, LE_LINK) > 0)
6368 return;
6369
6370 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6371 return;
6372
6373 hci_req_init(&req, hdev);
6374 enable_advertising(&req);
6375
6376 /* If this fails we have no option but to let user space know
6377 * that we've disabled advertising.
6378 */
6379 if (hci_req_run(&req, adv_enable_complete) < 0) {
6380 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6381 new_settings(hdev, NULL);
6382 }
6383 }
This page took 0.265773 seconds and 5 git commands to generate.