Bluetooth: Move LE event mask setting into init3 phase
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440}
1441
42c6b129 1442static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1443{
42c6b129
JH
1444 struct hci_dev *hdev = req->hdev;
1445
2177bab5 1446 if (lmp_bredr_capable(hdev))
42c6b129 1447 bredr_setup(req);
56f87901
JH
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1450
1451 if (lmp_le_capable(hdev))
42c6b129 1452 le_setup(req);
2177bab5 1453
42c6b129 1454 hci_setup_event_mask(req);
2177bab5 1455
3f8e2d75
JH
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1461
1462 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
2177bab5
JH
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
42c6b129
JH
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
2177bab5
JH
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
42c6b129 1486 hci_setup_inquiry_mode(req);
2177bab5
JH
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
42c6b129
JH
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
2177bab5
JH
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
2177bab5
JH
1503 }
1504}
1505
42c6b129 1506static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1507{
42c6b129 1508 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
42c6b129 1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_set_le_support(struct hci_request *req)
2177bab5 1526{
42c6b129 1527 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1528 struct hci_cp_write_le_host_supported cp;
1529
c73eee91
JH
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
2177bab5
JH
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
2177bab5
JH
1544}
1545
d62e6d67
JH
1546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
53b834d2 1554 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
53b834d2 1564 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
40c59fcb
MH
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
d62e6d67
JH
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
42c6b129 1578static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1579{
42c6b129 1580 struct hci_dev *hdev = req->hdev;
d2c5d77f 1581 u8 p;
42c6b129 1582
b8f4e068
GP
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
f9f462fa
MH
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
637b4cae 1595 */
f9f462fa
MH
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
2177bab5 1606 if (hdev->commands[5] & 0x10)
42c6b129 1607 hci_setup_link_policy(req);
2177bab5 1608
9193c6e8
AG
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
1614 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1615 events);
1616
42c6b129 1617 hci_set_le_support(req);
9193c6e8 1618 }
d2c5d77f
JH
1619
1620 /* Read features beyond page 1 if available */
1621 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1622 struct hci_cp_read_local_ext_features cp;
1623
1624 cp.page = p;
1625 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1626 sizeof(cp), &cp);
1627 }
2177bab5
JH
1628}
1629
5d4e7e8d
JH
1630static void hci_init4_req(struct hci_request *req, unsigned long opt)
1631{
1632 struct hci_dev *hdev = req->hdev;
1633
d62e6d67
JH
1634 /* Set event mask page 2 if the HCI command for it is supported */
1635 if (hdev->commands[22] & 0x04)
1636 hci_set_event_mask_page_2(req);
1637
5d4e7e8d 1638 /* Check for Synchronization Train support */
53b834d2 1639 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1640 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1641
1642 /* Enable Secure Connections if supported and configured */
5afeac14 1643 if ((lmp_sc_capable(hdev) ||
111902f7 1644 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1645 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1646 u8 support = 0x01;
1647 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1648 sizeof(support), &support);
1649 }
5d4e7e8d
JH
1650}
1651
2177bab5
JH
1652static int __hci_init(struct hci_dev *hdev)
1653{
1654 int err;
1655
1656 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1657 if (err < 0)
1658 return err;
1659
4b4148e9
MH
1660 /* The Device Under Test (DUT) mode is special and available for
1661 * all controller types. So just create it early on.
1662 */
1663 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1664 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1665 &dut_mode_fops);
1666 }
1667
2177bab5
JH
1668 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1669 * BR/EDR/LE type controllers. AMP controllers only need the
1670 * first stage init.
1671 */
1672 if (hdev->dev_type != HCI_BREDR)
1673 return 0;
1674
1675 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1676 if (err < 0)
1677 return err;
1678
5d4e7e8d
JH
1679 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1680 if (err < 0)
1681 return err;
1682
baf27f6e
MH
1683 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1684 if (err < 0)
1685 return err;
1686
1687 /* Only create debugfs entries during the initial setup
1688 * phase and not every time the controller gets powered on.
1689 */
1690 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1691 return 0;
1692
dfb826a8
MH
1693 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1694 &features_fops);
ceeb3bc0
MH
1695 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1696 &hdev->manufacturer);
1697 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1698 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1699 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1700 &blacklist_fops);
47219839
MH
1701 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1702
31ad1691
AK
1703 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1704 &conn_info_min_age_fops);
1705 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1706 &conn_info_max_age_fops);
1707
baf27f6e
MH
1708 if (lmp_bredr_capable(hdev)) {
1709 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1710 hdev, &inquiry_cache_fops);
02d08d15
MH
1711 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1712 hdev, &link_keys_fops);
babdbb3c
MH
1713 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1714 hdev, &dev_class_fops);
041000b9
MH
1715 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1716 hdev, &voice_setting_fops);
baf27f6e
MH
1717 }
1718
06f5b778 1719 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1720 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1721 hdev, &auto_accept_delay_fops);
5afeac14
MH
1722 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1723 hdev, &force_sc_support_fops);
134c2a89
MH
1724 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1725 hdev, &sc_only_mode_fops);
06f5b778 1726 }
ebd1e33b 1727
2bfa3531
MH
1728 if (lmp_sniff_capable(hdev)) {
1729 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1730 hdev, &idle_timeout_fops);
1731 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1732 hdev, &sniff_min_interval_fops);
1733 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1734 hdev, &sniff_max_interval_fops);
1735 }
1736
d0f729b8 1737 if (lmp_le_capable(hdev)) {
ac345813
MH
1738 debugfs_create_file("identity", 0400, hdev->debugfs,
1739 hdev, &identity_fops);
1740 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1741 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1742 debugfs_create_file("random_address", 0444, hdev->debugfs,
1743 hdev, &random_address_fops);
b32bba6c
MH
1744 debugfs_create_file("static_address", 0444, hdev->debugfs,
1745 hdev, &static_address_fops);
1746
1747 /* For controllers with a public address, provide a debug
1748 * option to force the usage of the configured static
1749 * address. By default the public address is used.
1750 */
1751 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1752 debugfs_create_file("force_static_address", 0644,
1753 hdev->debugfs, hdev,
1754 &force_static_address_fops);
1755
d0f729b8
MH
1756 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1757 &hdev->le_white_list_size);
d2ab0ac1
MH
1758 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1759 &white_list_fops);
3698d704
MH
1760 debugfs_create_file("identity_resolving_keys", 0400,
1761 hdev->debugfs, hdev,
1762 &identity_resolving_keys_fops);
8f8625cd
MH
1763 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1764 hdev, &long_term_keys_fops);
4e70c7e7
MH
1765 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1766 hdev, &conn_min_interval_fops);
1767 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1768 hdev, &conn_max_interval_fops);
816a93d1
MH
1769 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1770 hdev, &conn_latency_fops);
f1649577
MH
1771 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1772 hdev, &supervision_timeout_fops);
3f959d46
MH
1773 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1774 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1775 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1776 &device_list_fops);
b9a7a61e
LR
1777 debugfs_create_u16("discov_interleaved_timeout", 0644,
1778 hdev->debugfs,
1779 &hdev->discov_interleaved_timeout);
d0f729b8 1780 }
e7b8fc92 1781
baf27f6e 1782 return 0;
2177bab5
JH
1783}
1784
42c6b129 1785static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1786{
1787 __u8 scan = opt;
1788
42c6b129 1789 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1790
1791 /* Inquiry and Page scans */
42c6b129 1792 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1793}
1794
42c6b129 1795static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1796{
1797 __u8 auth = opt;
1798
42c6b129 1799 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1800
1801 /* Authentication */
42c6b129 1802 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1803}
1804
42c6b129 1805static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1806{
1807 __u8 encrypt = opt;
1808
42c6b129 1809 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1810
e4e8e37c 1811 /* Encryption */
42c6b129 1812 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1813}
1814
42c6b129 1815static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1816{
1817 __le16 policy = cpu_to_le16(opt);
1818
42c6b129 1819 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1820
1821 /* Default link policy */
42c6b129 1822 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1823}
1824
8e87d142 1825/* Get HCI device by index.
1da177e4
LT
1826 * Device is held on return. */
1827struct hci_dev *hci_dev_get(int index)
1828{
8035ded4 1829 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1830
1831 BT_DBG("%d", index);
1832
1833 if (index < 0)
1834 return NULL;
1835
1836 read_lock(&hci_dev_list_lock);
8035ded4 1837 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1838 if (d->id == index) {
1839 hdev = hci_dev_hold(d);
1840 break;
1841 }
1842 }
1843 read_unlock(&hci_dev_list_lock);
1844 return hdev;
1845}
1da177e4
LT
1846
1847/* ---- Inquiry support ---- */
ff9ef578 1848
30dc78e1
JH
1849bool hci_discovery_active(struct hci_dev *hdev)
1850{
1851 struct discovery_state *discov = &hdev->discovery;
1852
6fbe195d 1853 switch (discov->state) {
343f935b 1854 case DISCOVERY_FINDING:
6fbe195d 1855 case DISCOVERY_RESOLVING:
30dc78e1
JH
1856 return true;
1857
6fbe195d
AG
1858 default:
1859 return false;
1860 }
30dc78e1
JH
1861}
1862
ff9ef578
JH
1863void hci_discovery_set_state(struct hci_dev *hdev, int state)
1864{
1865 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1866
1867 if (hdev->discovery.state == state)
1868 return;
1869
1870 switch (state) {
1871 case DISCOVERY_STOPPED:
c54c3860
AG
1872 hci_update_background_scan(hdev);
1873
7b99b659
AG
1874 if (hdev->discovery.state != DISCOVERY_STARTING)
1875 mgmt_discovering(hdev, 0);
ff9ef578
JH
1876 break;
1877 case DISCOVERY_STARTING:
1878 break;
343f935b 1879 case DISCOVERY_FINDING:
ff9ef578
JH
1880 mgmt_discovering(hdev, 1);
1881 break;
30dc78e1
JH
1882 case DISCOVERY_RESOLVING:
1883 break;
ff9ef578
JH
1884 case DISCOVERY_STOPPING:
1885 break;
1886 }
1887
1888 hdev->discovery.state = state;
1889}
1890
1f9b9a5d 1891void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1892{
30883512 1893 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1894 struct inquiry_entry *p, *n;
1da177e4 1895
561aafbc
JH
1896 list_for_each_entry_safe(p, n, &cache->all, all) {
1897 list_del(&p->all);
b57c1a56 1898 kfree(p);
1da177e4 1899 }
561aafbc
JH
1900
1901 INIT_LIST_HEAD(&cache->unknown);
1902 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1903}
1904
a8c5fb1a
GP
1905struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1906 bdaddr_t *bdaddr)
1da177e4 1907{
30883512 1908 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1909 struct inquiry_entry *e;
1910
6ed93dc6 1911 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1912
561aafbc
JH
1913 list_for_each_entry(e, &cache->all, all) {
1914 if (!bacmp(&e->data.bdaddr, bdaddr))
1915 return e;
1916 }
1917
1918 return NULL;
1919}
1920
1921struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1922 bdaddr_t *bdaddr)
561aafbc 1923{
30883512 1924 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1925 struct inquiry_entry *e;
1926
6ed93dc6 1927 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1928
1929 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1930 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1931 return e;
1932 }
1933
1934 return NULL;
1da177e4
LT
1935}
1936
30dc78e1 1937struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1938 bdaddr_t *bdaddr,
1939 int state)
30dc78e1
JH
1940{
1941 struct discovery_state *cache = &hdev->discovery;
1942 struct inquiry_entry *e;
1943
6ed93dc6 1944 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1945
1946 list_for_each_entry(e, &cache->resolve, list) {
1947 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1948 return e;
1949 if (!bacmp(&e->data.bdaddr, bdaddr))
1950 return e;
1951 }
1952
1953 return NULL;
1954}
1955
a3d4e20a 1956void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1957 struct inquiry_entry *ie)
a3d4e20a
JH
1958{
1959 struct discovery_state *cache = &hdev->discovery;
1960 struct list_head *pos = &cache->resolve;
1961 struct inquiry_entry *p;
1962
1963 list_del(&ie->list);
1964
1965 list_for_each_entry(p, &cache->resolve, list) {
1966 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1967 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1968 break;
1969 pos = &p->list;
1970 }
1971
1972 list_add(&ie->list, pos);
1973}
1974
af58925c
MH
1975u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1976 bool name_known)
1da177e4 1977{
30883512 1978 struct discovery_state *cache = &hdev->discovery;
70f23020 1979 struct inquiry_entry *ie;
af58925c 1980 u32 flags = 0;
1da177e4 1981
6ed93dc6 1982 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1983
2b2fec4d
SJ
1984 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1985
af58925c
MH
1986 if (!data->ssp_mode)
1987 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1988
70f23020 1989 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1990 if (ie) {
af58925c
MH
1991 if (!ie->data.ssp_mode)
1992 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1993
a3d4e20a 1994 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1995 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1996 ie->data.rssi = data->rssi;
1997 hci_inquiry_cache_update_resolve(hdev, ie);
1998 }
1999
561aafbc 2000 goto update;
a3d4e20a 2001 }
561aafbc
JH
2002
2003 /* Entry not in the cache. Add new one. */
2004 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2005 if (!ie) {
2006 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2007 goto done;
2008 }
561aafbc
JH
2009
2010 list_add(&ie->all, &cache->all);
2011
2012 if (name_known) {
2013 ie->name_state = NAME_KNOWN;
2014 } else {
2015 ie->name_state = NAME_NOT_KNOWN;
2016 list_add(&ie->list, &cache->unknown);
2017 }
70f23020 2018
561aafbc
JH
2019update:
2020 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2021 ie->name_state != NAME_PENDING) {
561aafbc
JH
2022 ie->name_state = NAME_KNOWN;
2023 list_del(&ie->list);
1da177e4
LT
2024 }
2025
70f23020
AE
2026 memcpy(&ie->data, data, sizeof(*data));
2027 ie->timestamp = jiffies;
1da177e4 2028 cache->timestamp = jiffies;
3175405b
JH
2029
2030 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2031 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2032
af58925c
MH
2033done:
2034 return flags;
1da177e4
LT
2035}
2036
2037static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2038{
30883512 2039 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2040 struct inquiry_info *info = (struct inquiry_info *) buf;
2041 struct inquiry_entry *e;
2042 int copied = 0;
2043
561aafbc 2044 list_for_each_entry(e, &cache->all, all) {
1da177e4 2045 struct inquiry_data *data = &e->data;
b57c1a56
JH
2046
2047 if (copied >= num)
2048 break;
2049
1da177e4
LT
2050 bacpy(&info->bdaddr, &data->bdaddr);
2051 info->pscan_rep_mode = data->pscan_rep_mode;
2052 info->pscan_period_mode = data->pscan_period_mode;
2053 info->pscan_mode = data->pscan_mode;
2054 memcpy(info->dev_class, data->dev_class, 3);
2055 info->clock_offset = data->clock_offset;
b57c1a56 2056
1da177e4 2057 info++;
b57c1a56 2058 copied++;
1da177e4
LT
2059 }
2060
2061 BT_DBG("cache %p, copied %d", cache, copied);
2062 return copied;
2063}
2064
42c6b129 2065static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2066{
2067 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2068 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2069 struct hci_cp_inquiry cp;
2070
2071 BT_DBG("%s", hdev->name);
2072
2073 if (test_bit(HCI_INQUIRY, &hdev->flags))
2074 return;
2075
2076 /* Start Inquiry */
2077 memcpy(&cp.lap, &ir->lap, 3);
2078 cp.length = ir->length;
2079 cp.num_rsp = ir->num_rsp;
42c6b129 2080 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2081}
2082
3e13fa1e
AG
2083static int wait_inquiry(void *word)
2084{
2085 schedule();
2086 return signal_pending(current);
2087}
2088
1da177e4
LT
2089int hci_inquiry(void __user *arg)
2090{
2091 __u8 __user *ptr = arg;
2092 struct hci_inquiry_req ir;
2093 struct hci_dev *hdev;
2094 int err = 0, do_inquiry = 0, max_rsp;
2095 long timeo;
2096 __u8 *buf;
2097
2098 if (copy_from_user(&ir, ptr, sizeof(ir)))
2099 return -EFAULT;
2100
5a08ecce
AE
2101 hdev = hci_dev_get(ir.dev_id);
2102 if (!hdev)
1da177e4
LT
2103 return -ENODEV;
2104
0736cfa8
MH
2105 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2106 err = -EBUSY;
2107 goto done;
2108 }
2109
fee746b0
MH
2110 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2111 err = -EOPNOTSUPP;
2112 goto done;
2113 }
2114
5b69bef5
MH
2115 if (hdev->dev_type != HCI_BREDR) {
2116 err = -EOPNOTSUPP;
2117 goto done;
2118 }
2119
56f87901
JH
2120 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2121 err = -EOPNOTSUPP;
2122 goto done;
2123 }
2124
09fd0de5 2125 hci_dev_lock(hdev);
8e87d142 2126 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2127 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2128 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2129 do_inquiry = 1;
2130 }
09fd0de5 2131 hci_dev_unlock(hdev);
1da177e4 2132
04837f64 2133 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2134
2135 if (do_inquiry) {
01178cd4
JH
2136 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2137 timeo);
70f23020
AE
2138 if (err < 0)
2139 goto done;
3e13fa1e
AG
2140
2141 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2142 * cleared). If it is interrupted by a signal, return -EINTR.
2143 */
2144 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2145 TASK_INTERRUPTIBLE))
2146 return -EINTR;
70f23020 2147 }
1da177e4 2148
8fc9ced3
GP
2149 /* for unlimited number of responses we will use buffer with
2150 * 255 entries
2151 */
1da177e4
LT
2152 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2153
2154 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2155 * copy it to the user space.
2156 */
01df8c31 2157 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2158 if (!buf) {
1da177e4
LT
2159 err = -ENOMEM;
2160 goto done;
2161 }
2162
09fd0de5 2163 hci_dev_lock(hdev);
1da177e4 2164 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2165 hci_dev_unlock(hdev);
1da177e4
LT
2166
2167 BT_DBG("num_rsp %d", ir.num_rsp);
2168
2169 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2170 ptr += sizeof(ir);
2171 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2172 ir.num_rsp))
1da177e4 2173 err = -EFAULT;
8e87d142 2174 } else
1da177e4
LT
2175 err = -EFAULT;
2176
2177 kfree(buf);
2178
2179done:
2180 hci_dev_put(hdev);
2181 return err;
2182}
2183
cbed0ca1 2184static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2185{
1da177e4
LT
2186 int ret = 0;
2187
1da177e4
LT
2188 BT_DBG("%s %p", hdev->name, hdev);
2189
2190 hci_req_lock(hdev);
2191
94324962
JH
2192 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2193 ret = -ENODEV;
2194 goto done;
2195 }
2196
a5c8f270
MH
2197 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2198 /* Check for rfkill but allow the HCI setup stage to
2199 * proceed (which in itself doesn't cause any RF activity).
2200 */
2201 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2202 ret = -ERFKILL;
2203 goto done;
2204 }
2205
2206 /* Check for valid public address or a configured static
2207 * random adddress, but let the HCI setup proceed to
2208 * be able to determine if there is a public address
2209 * or not.
2210 *
c6beca0e
MH
2211 * In case of user channel usage, it is not important
2212 * if a public address or static random address is
2213 * available.
2214 *
a5c8f270
MH
2215 * This check is only valid for BR/EDR controllers
2216 * since AMP controllers do not have an address.
2217 */
c6beca0e
MH
2218 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2219 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2220 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2221 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2222 ret = -EADDRNOTAVAIL;
2223 goto done;
2224 }
611b30f7
MH
2225 }
2226
1da177e4
LT
2227 if (test_bit(HCI_UP, &hdev->flags)) {
2228 ret = -EALREADY;
2229 goto done;
2230 }
2231
1da177e4
LT
2232 if (hdev->open(hdev)) {
2233 ret = -EIO;
2234 goto done;
2235 }
2236
f41c70c4
MH
2237 atomic_set(&hdev->cmd_cnt, 1);
2238 set_bit(HCI_INIT, &hdev->flags);
2239
2240 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2241 ret = hdev->setup(hdev);
2242
2243 if (!ret) {
fee746b0 2244 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
0736cfa8 2245 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2246 ret = __hci_init(hdev);
1da177e4
LT
2247 }
2248
f41c70c4
MH
2249 clear_bit(HCI_INIT, &hdev->flags);
2250
1da177e4
LT
2251 if (!ret) {
2252 hci_dev_hold(hdev);
d6bfd59c 2253 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2254 set_bit(HCI_UP, &hdev->flags);
2255 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2256 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2257 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2258 hdev->dev_type == HCI_BREDR) {
09fd0de5 2259 hci_dev_lock(hdev);
744cf19e 2260 mgmt_powered(hdev, 1);
09fd0de5 2261 hci_dev_unlock(hdev);
56e5cb86 2262 }
8e87d142 2263 } else {
1da177e4 2264 /* Init failed, cleanup */
3eff45ea 2265 flush_work(&hdev->tx_work);
c347b765 2266 flush_work(&hdev->cmd_work);
b78752cc 2267 flush_work(&hdev->rx_work);
1da177e4
LT
2268
2269 skb_queue_purge(&hdev->cmd_q);
2270 skb_queue_purge(&hdev->rx_q);
2271
2272 if (hdev->flush)
2273 hdev->flush(hdev);
2274
2275 if (hdev->sent_cmd) {
2276 kfree_skb(hdev->sent_cmd);
2277 hdev->sent_cmd = NULL;
2278 }
2279
2280 hdev->close(hdev);
fee746b0 2281 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2282 }
2283
2284done:
2285 hci_req_unlock(hdev);
1da177e4
LT
2286 return ret;
2287}
2288
cbed0ca1
JH
2289/* ---- HCI ioctl helpers ---- */
2290
2291int hci_dev_open(__u16 dev)
2292{
2293 struct hci_dev *hdev;
2294 int err;
2295
2296 hdev = hci_dev_get(dev);
2297 if (!hdev)
2298 return -ENODEV;
2299
fee746b0
MH
2300 /* Devices that are marked for raw-only usage can only be powered
2301 * up as user channel. Trying to bring them up as normal devices
2302 * will result into a failure. Only user channel operation is
2303 * possible.
2304 *
2305 * When this function is called for a user channel, the flag
2306 * HCI_USER_CHANNEL will be set first before attempting to
2307 * open the device.
2308 */
2309 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2310 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2311 err = -EOPNOTSUPP;
2312 goto done;
2313 }
2314
e1d08f40
JH
2315 /* We need to ensure that no other power on/off work is pending
2316 * before proceeding to call hci_dev_do_open. This is
2317 * particularly important if the setup procedure has not yet
2318 * completed.
2319 */
2320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2321 cancel_delayed_work(&hdev->power_off);
2322
a5c8f270
MH
2323 /* After this call it is guaranteed that the setup procedure
2324 * has finished. This means that error conditions like RFKILL
2325 * or no valid public or static random address apply.
2326 */
e1d08f40
JH
2327 flush_workqueue(hdev->req_workqueue);
2328
cbed0ca1
JH
2329 err = hci_dev_do_open(hdev);
2330
fee746b0 2331done:
cbed0ca1 2332 hci_dev_put(hdev);
cbed0ca1
JH
2333 return err;
2334}
2335
1da177e4
LT
2336static int hci_dev_do_close(struct hci_dev *hdev)
2337{
2338 BT_DBG("%s %p", hdev->name, hdev);
2339
78c04c0b
VCG
2340 cancel_delayed_work(&hdev->power_off);
2341
1da177e4
LT
2342 hci_req_cancel(hdev, ENODEV);
2343 hci_req_lock(hdev);
2344
2345 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2346 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2347 hci_req_unlock(hdev);
2348 return 0;
2349 }
2350
3eff45ea
GP
2351 /* Flush RX and TX works */
2352 flush_work(&hdev->tx_work);
b78752cc 2353 flush_work(&hdev->rx_work);
1da177e4 2354
16ab91ab 2355 if (hdev->discov_timeout > 0) {
e0f9309f 2356 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2357 hdev->discov_timeout = 0;
5e5282bb 2358 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2359 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2360 }
2361
a8b2d5c2 2362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2363 cancel_delayed_work(&hdev->service_cache);
2364
7ba8b4be 2365 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2366
2367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2368 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2369
09fd0de5 2370 hci_dev_lock(hdev);
1f9b9a5d 2371 hci_inquiry_cache_flush(hdev);
1da177e4 2372 hci_conn_hash_flush(hdev);
6046dc3e 2373 hci_pend_le_conns_clear(hdev);
09fd0de5 2374 hci_dev_unlock(hdev);
1da177e4
LT
2375
2376 hci_notify(hdev, HCI_DEV_DOWN);
2377
2378 if (hdev->flush)
2379 hdev->flush(hdev);
2380
2381 /* Reset device */
2382 skb_queue_purge(&hdev->cmd_q);
2383 atomic_set(&hdev->cmd_cnt, 1);
fee746b0 2384 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
3a6afbd2 2385 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2386 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2387 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2388 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2389 clear_bit(HCI_INIT, &hdev->flags);
2390 }
2391
c347b765
GP
2392 /* flush cmd work */
2393 flush_work(&hdev->cmd_work);
1da177e4
LT
2394
2395 /* Drop queues */
2396 skb_queue_purge(&hdev->rx_q);
2397 skb_queue_purge(&hdev->cmd_q);
2398 skb_queue_purge(&hdev->raw_q);
2399
2400 /* Drop last sent command */
2401 if (hdev->sent_cmd) {
65cc2b49 2402 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2403 kfree_skb(hdev->sent_cmd);
2404 hdev->sent_cmd = NULL;
2405 }
2406
b6ddb638
JH
2407 kfree_skb(hdev->recv_evt);
2408 hdev->recv_evt = NULL;
2409
1da177e4
LT
2410 /* After this point our queues are empty
2411 * and no tasks are scheduled. */
2412 hdev->close(hdev);
2413
35b973c9 2414 /* Clear flags */
fee746b0 2415 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2416 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2417
93c311a0
MH
2418 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2419 if (hdev->dev_type == HCI_BREDR) {
2420 hci_dev_lock(hdev);
2421 mgmt_powered(hdev, 0);
2422 hci_dev_unlock(hdev);
2423 }
8ee56540 2424 }
5add6af8 2425
ced5c338 2426 /* Controller radio is available but is currently powered down */
536619e8 2427 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2428
e59fda8d 2429 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2430 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2431 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2432
1da177e4
LT
2433 hci_req_unlock(hdev);
2434
2435 hci_dev_put(hdev);
2436 return 0;
2437}
2438
2439int hci_dev_close(__u16 dev)
2440{
2441 struct hci_dev *hdev;
2442 int err;
2443
70f23020
AE
2444 hdev = hci_dev_get(dev);
2445 if (!hdev)
1da177e4 2446 return -ENODEV;
8ee56540 2447
0736cfa8
MH
2448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2449 err = -EBUSY;
2450 goto done;
2451 }
2452
8ee56540
MH
2453 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2454 cancel_delayed_work(&hdev->power_off);
2455
1da177e4 2456 err = hci_dev_do_close(hdev);
8ee56540 2457
0736cfa8 2458done:
1da177e4
LT
2459 hci_dev_put(hdev);
2460 return err;
2461}
2462
2463int hci_dev_reset(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int ret = 0;
2467
70f23020
AE
2468 hdev = hci_dev_get(dev);
2469 if (!hdev)
1da177e4
LT
2470 return -ENODEV;
2471
2472 hci_req_lock(hdev);
1da177e4 2473
808a049e
MH
2474 if (!test_bit(HCI_UP, &hdev->flags)) {
2475 ret = -ENETDOWN;
1da177e4 2476 goto done;
808a049e 2477 }
1da177e4 2478
0736cfa8
MH
2479 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 ret = -EBUSY;
2481 goto done;
2482 }
2483
fee746b0
MH
2484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2485 ret = -EOPNOTSUPP;
2486 goto done;
2487 }
2488
1da177e4
LT
2489 /* Drop queues */
2490 skb_queue_purge(&hdev->rx_q);
2491 skb_queue_purge(&hdev->cmd_q);
2492
09fd0de5 2493 hci_dev_lock(hdev);
1f9b9a5d 2494 hci_inquiry_cache_flush(hdev);
1da177e4 2495 hci_conn_hash_flush(hdev);
09fd0de5 2496 hci_dev_unlock(hdev);
1da177e4
LT
2497
2498 if (hdev->flush)
2499 hdev->flush(hdev);
2500
8e87d142 2501 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2502 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2503
fee746b0 2504 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2505
2506done:
1da177e4
LT
2507 hci_req_unlock(hdev);
2508 hci_dev_put(hdev);
2509 return ret;
2510}
2511
2512int hci_dev_reset_stat(__u16 dev)
2513{
2514 struct hci_dev *hdev;
2515 int ret = 0;
2516
70f23020
AE
2517 hdev = hci_dev_get(dev);
2518 if (!hdev)
1da177e4
LT
2519 return -ENODEV;
2520
0736cfa8
MH
2521 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2522 ret = -EBUSY;
2523 goto done;
2524 }
2525
fee746b0
MH
2526 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2527 ret = -EOPNOTSUPP;
2528 goto done;
2529 }
2530
1da177e4
LT
2531 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2532
0736cfa8 2533done:
1da177e4 2534 hci_dev_put(hdev);
1da177e4
LT
2535 return ret;
2536}
2537
2538int hci_dev_cmd(unsigned int cmd, void __user *arg)
2539{
2540 struct hci_dev *hdev;
2541 struct hci_dev_req dr;
2542 int err = 0;
2543
2544 if (copy_from_user(&dr, arg, sizeof(dr)))
2545 return -EFAULT;
2546
70f23020
AE
2547 hdev = hci_dev_get(dr.dev_id);
2548 if (!hdev)
1da177e4
LT
2549 return -ENODEV;
2550
0736cfa8
MH
2551 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2552 err = -EBUSY;
2553 goto done;
2554 }
2555
fee746b0
MH
2556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2557 err = -EOPNOTSUPP;
2558 goto done;
2559 }
2560
5b69bef5
MH
2561 if (hdev->dev_type != HCI_BREDR) {
2562 err = -EOPNOTSUPP;
2563 goto done;
2564 }
2565
56f87901
JH
2566 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2567 err = -EOPNOTSUPP;
2568 goto done;
2569 }
2570
1da177e4
LT
2571 switch (cmd) {
2572 case HCISETAUTH:
01178cd4
JH
2573 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2574 HCI_INIT_TIMEOUT);
1da177e4
LT
2575 break;
2576
2577 case HCISETENCRYPT:
2578 if (!lmp_encrypt_capable(hdev)) {
2579 err = -EOPNOTSUPP;
2580 break;
2581 }
2582
2583 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2584 /* Auth must be enabled first */
01178cd4
JH
2585 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2586 HCI_INIT_TIMEOUT);
1da177e4
LT
2587 if (err)
2588 break;
2589 }
2590
01178cd4
JH
2591 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2592 HCI_INIT_TIMEOUT);
1da177e4
LT
2593 break;
2594
2595 case HCISETSCAN:
01178cd4
JH
2596 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2597 HCI_INIT_TIMEOUT);
1da177e4
LT
2598 break;
2599
1da177e4 2600 case HCISETLINKPOL:
01178cd4
JH
2601 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
1da177e4
LT
2603 break;
2604
2605 case HCISETLINKMODE:
e4e8e37c
MH
2606 hdev->link_mode = ((__u16) dr.dev_opt) &
2607 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2608 break;
2609
2610 case HCISETPTYPE:
2611 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2612 break;
2613
2614 case HCISETACLMTU:
e4e8e37c
MH
2615 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2616 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2617 break;
2618
2619 case HCISETSCOMTU:
e4e8e37c
MH
2620 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2621 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2622 break;
2623
2624 default:
2625 err = -EINVAL;
2626 break;
2627 }
e4e8e37c 2628
0736cfa8 2629done:
1da177e4
LT
2630 hci_dev_put(hdev);
2631 return err;
2632}
2633
2634int hci_get_dev_list(void __user *arg)
2635{
8035ded4 2636 struct hci_dev *hdev;
1da177e4
LT
2637 struct hci_dev_list_req *dl;
2638 struct hci_dev_req *dr;
1da177e4
LT
2639 int n = 0, size, err;
2640 __u16 dev_num;
2641
2642 if (get_user(dev_num, (__u16 __user *) arg))
2643 return -EFAULT;
2644
2645 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2646 return -EINVAL;
2647
2648 size = sizeof(*dl) + dev_num * sizeof(*dr);
2649
70f23020
AE
2650 dl = kzalloc(size, GFP_KERNEL);
2651 if (!dl)
1da177e4
LT
2652 return -ENOMEM;
2653
2654 dr = dl->dev_req;
2655
f20d09d5 2656 read_lock(&hci_dev_list_lock);
8035ded4 2657 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2658 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2659 cancel_delayed_work(&hdev->power_off);
c542a06c 2660
a8b2d5c2
JH
2661 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2662 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2663
1da177e4
LT
2664 (dr + n)->dev_id = hdev->id;
2665 (dr + n)->dev_opt = hdev->flags;
c542a06c 2666
1da177e4
LT
2667 if (++n >= dev_num)
2668 break;
2669 }
f20d09d5 2670 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2671
2672 dl->dev_num = n;
2673 size = sizeof(*dl) + n * sizeof(*dr);
2674
2675 err = copy_to_user(arg, dl, size);
2676 kfree(dl);
2677
2678 return err ? -EFAULT : 0;
2679}
2680
2681int hci_get_dev_info(void __user *arg)
2682{
2683 struct hci_dev *hdev;
2684 struct hci_dev_info di;
2685 int err = 0;
2686
2687 if (copy_from_user(&di, arg, sizeof(di)))
2688 return -EFAULT;
2689
70f23020
AE
2690 hdev = hci_dev_get(di.dev_id);
2691 if (!hdev)
1da177e4
LT
2692 return -ENODEV;
2693
a8b2d5c2 2694 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2695 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2696
a8b2d5c2
JH
2697 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2698 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2699
1da177e4
LT
2700 strcpy(di.name, hdev->name);
2701 di.bdaddr = hdev->bdaddr;
60f2a3ed 2702 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2703 di.flags = hdev->flags;
2704 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2705 if (lmp_bredr_capable(hdev)) {
2706 di.acl_mtu = hdev->acl_mtu;
2707 di.acl_pkts = hdev->acl_pkts;
2708 di.sco_mtu = hdev->sco_mtu;
2709 di.sco_pkts = hdev->sco_pkts;
2710 } else {
2711 di.acl_mtu = hdev->le_mtu;
2712 di.acl_pkts = hdev->le_pkts;
2713 di.sco_mtu = 0;
2714 di.sco_pkts = 0;
2715 }
1da177e4
LT
2716 di.link_policy = hdev->link_policy;
2717 di.link_mode = hdev->link_mode;
2718
2719 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2720 memcpy(&di.features, &hdev->features, sizeof(di.features));
2721
2722 if (copy_to_user(arg, &di, sizeof(di)))
2723 err = -EFAULT;
2724
2725 hci_dev_put(hdev);
2726
2727 return err;
2728}
2729
2730/* ---- Interface to HCI drivers ---- */
2731
611b30f7
MH
2732static int hci_rfkill_set_block(void *data, bool blocked)
2733{
2734 struct hci_dev *hdev = data;
2735
2736 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2737
0736cfa8
MH
2738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2739 return -EBUSY;
2740
5e130367
JH
2741 if (blocked) {
2742 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2743 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2744 hci_dev_do_close(hdev);
5e130367
JH
2745 } else {
2746 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2747 }
611b30f7
MH
2748
2749 return 0;
2750}
2751
2752static const struct rfkill_ops hci_rfkill_ops = {
2753 .set_block = hci_rfkill_set_block,
2754};
2755
ab81cbf9
JH
2756static void hci_power_on(struct work_struct *work)
2757{
2758 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2759 int err;
ab81cbf9
JH
2760
2761 BT_DBG("%s", hdev->name);
2762
cbed0ca1 2763 err = hci_dev_do_open(hdev);
96570ffc
JH
2764 if (err < 0) {
2765 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2766 return;
96570ffc 2767 }
ab81cbf9 2768
a5c8f270
MH
2769 /* During the HCI setup phase, a few error conditions are
2770 * ignored and they need to be checked now. If they are still
2771 * valid, it is important to turn the device back off.
2772 */
2773 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2774 (hdev->dev_type == HCI_BREDR &&
2775 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2776 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2777 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2778 hci_dev_do_close(hdev);
2779 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2780 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2781 HCI_AUTO_OFF_TIMEOUT);
bf543036 2782 }
ab81cbf9 2783
fee746b0
MH
2784 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2785 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2786 mgmt_index_added(hdev);
2787 }
ab81cbf9
JH
2788}
2789
2790static void hci_power_off(struct work_struct *work)
2791{
3243553f 2792 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2793 power_off.work);
ab81cbf9
JH
2794
2795 BT_DBG("%s", hdev->name);
2796
8ee56540 2797 hci_dev_do_close(hdev);
ab81cbf9
JH
2798}
2799
16ab91ab
JH
2800static void hci_discov_off(struct work_struct *work)
2801{
2802 struct hci_dev *hdev;
16ab91ab
JH
2803
2804 hdev = container_of(work, struct hci_dev, discov_off.work);
2805
2806 BT_DBG("%s", hdev->name);
2807
d1967ff8 2808 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2809}
2810
35f7498a 2811void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2812{
4821002c 2813 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2814
4821002c
JH
2815 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2816 list_del(&uuid->list);
2aeb9a1a
JH
2817 kfree(uuid);
2818 }
2aeb9a1a
JH
2819}
2820
35f7498a 2821void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2822{
2823 struct list_head *p, *n;
2824
2825 list_for_each_safe(p, n, &hdev->link_keys) {
2826 struct link_key *key;
2827
2828 key = list_entry(p, struct link_key, list);
2829
2830 list_del(p);
2831 kfree(key);
2832 }
55ed8ca1
JH
2833}
2834
35f7498a 2835void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2836{
2837 struct smp_ltk *k, *tmp;
2838
2839 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2840 list_del(&k->list);
2841 kfree(k);
2842 }
b899efaf
VCG
2843}
2844
970c4e46
JH
2845void hci_smp_irks_clear(struct hci_dev *hdev)
2846{
2847 struct smp_irk *k, *tmp;
2848
2849 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2850 list_del(&k->list);
2851 kfree(k);
2852 }
2853}
2854
55ed8ca1
JH
2855struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2856{
8035ded4 2857 struct link_key *k;
55ed8ca1 2858
8035ded4 2859 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2860 if (bacmp(bdaddr, &k->bdaddr) == 0)
2861 return k;
55ed8ca1
JH
2862
2863 return NULL;
2864}
2865
745c0ce3 2866static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2867 u8 key_type, u8 old_key_type)
d25e28ab
JH
2868{
2869 /* Legacy key */
2870 if (key_type < 0x03)
745c0ce3 2871 return true;
d25e28ab
JH
2872
2873 /* Debug keys are insecure so don't store them persistently */
2874 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2875 return false;
d25e28ab
JH
2876
2877 /* Changed combination key and there's no previous one */
2878 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2879 return false;
d25e28ab
JH
2880
2881 /* Security mode 3 case */
2882 if (!conn)
745c0ce3 2883 return true;
d25e28ab
JH
2884
2885 /* Neither local nor remote side had no-bonding as requirement */
2886 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2887 return true;
d25e28ab
JH
2888
2889 /* Local side had dedicated bonding as requirement */
2890 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2891 return true;
d25e28ab
JH
2892
2893 /* Remote side had dedicated bonding as requirement */
2894 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2895 return true;
d25e28ab
JH
2896
2897 /* If none of the above criteria match, then don't store the key
2898 * persistently */
745c0ce3 2899 return false;
d25e28ab
JH
2900}
2901
98a0b845
JH
2902static bool ltk_type_master(u8 type)
2903{
d97c9fb0 2904 return (type == SMP_LTK);
98a0b845
JH
2905}
2906
fe39c7b2 2907struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2908 bool master)
75d262c2 2909{
c9839a11 2910 struct smp_ltk *k;
75d262c2 2911
c9839a11 2912 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2913 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2914 continue;
2915
98a0b845
JH
2916 if (ltk_type_master(k->type) != master)
2917 continue;
2918
c9839a11 2919 return k;
75d262c2
VCG
2920 }
2921
2922 return NULL;
2923}
75d262c2 2924
c9839a11 2925struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2926 u8 addr_type, bool master)
75d262c2 2927{
c9839a11 2928 struct smp_ltk *k;
75d262c2 2929
c9839a11
VCG
2930 list_for_each_entry(k, &hdev->long_term_keys, list)
2931 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2932 bacmp(bdaddr, &k->bdaddr) == 0 &&
2933 ltk_type_master(k->type) == master)
75d262c2
VCG
2934 return k;
2935
2936 return NULL;
2937}
75d262c2 2938
970c4e46
JH
2939struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2940{
2941 struct smp_irk *irk;
2942
2943 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2944 if (!bacmp(&irk->rpa, rpa))
2945 return irk;
2946 }
2947
2948 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2949 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2950 bacpy(&irk->rpa, rpa);
2951 return irk;
2952 }
2953 }
2954
2955 return NULL;
2956}
2957
2958struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2959 u8 addr_type)
2960{
2961 struct smp_irk *irk;
2962
6cfc9988
JH
2963 /* Identity Address must be public or static random */
2964 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2965 return NULL;
2966
970c4e46
JH
2967 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2968 if (addr_type == irk->addr_type &&
2969 bacmp(bdaddr, &irk->bdaddr) == 0)
2970 return irk;
2971 }
2972
2973 return NULL;
2974}
2975
567fa2aa 2976struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2977 bdaddr_t *bdaddr, u8 *val, u8 type,
2978 u8 pin_len, bool *persistent)
55ed8ca1
JH
2979{
2980 struct link_key *key, *old_key;
745c0ce3 2981 u8 old_key_type;
55ed8ca1
JH
2982
2983 old_key = hci_find_link_key(hdev, bdaddr);
2984 if (old_key) {
2985 old_key_type = old_key->type;
2986 key = old_key;
2987 } else {
12adcf3a 2988 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2989 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2990 if (!key)
567fa2aa 2991 return NULL;
55ed8ca1
JH
2992 list_add(&key->list, &hdev->link_keys);
2993 }
2994
6ed93dc6 2995 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2996
d25e28ab
JH
2997 /* Some buggy controller combinations generate a changed
2998 * combination key for legacy pairing even when there's no
2999 * previous key */
3000 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3001 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3002 type = HCI_LK_COMBINATION;
655fe6ec
JH
3003 if (conn)
3004 conn->key_type = type;
3005 }
d25e28ab 3006
55ed8ca1 3007 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3008 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3009 key->pin_len = pin_len;
3010
b6020ba0 3011 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3012 key->type = old_key_type;
4748fed2
JH
3013 else
3014 key->type = type;
3015
7652ff6a
JH
3016 if (persistent)
3017 *persistent = hci_persistent_key(hdev, conn, type,
3018 old_key_type);
55ed8ca1 3019
567fa2aa 3020 return key;
55ed8ca1
JH
3021}
3022
ca9142b8 3023struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3024 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3025 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3026{
c9839a11 3027 struct smp_ltk *key, *old_key;
98a0b845 3028 bool master = ltk_type_master(type);
75d262c2 3029
98a0b845 3030 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3031 if (old_key)
75d262c2 3032 key = old_key;
c9839a11 3033 else {
0a14ab41 3034 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3035 if (!key)
ca9142b8 3036 return NULL;
c9839a11 3037 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3038 }
3039
75d262c2 3040 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3041 key->bdaddr_type = addr_type;
3042 memcpy(key->val, tk, sizeof(key->val));
3043 key->authenticated = authenticated;
3044 key->ediv = ediv;
fe39c7b2 3045 key->rand = rand;
c9839a11
VCG
3046 key->enc_size = enc_size;
3047 key->type = type;
75d262c2 3048
ca9142b8 3049 return key;
75d262c2
VCG
3050}
3051
ca9142b8
JH
3052struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3053 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3054{
3055 struct smp_irk *irk;
3056
3057 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3058 if (!irk) {
3059 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3060 if (!irk)
ca9142b8 3061 return NULL;
970c4e46
JH
3062
3063 bacpy(&irk->bdaddr, bdaddr);
3064 irk->addr_type = addr_type;
3065
3066 list_add(&irk->list, &hdev->identity_resolving_keys);
3067 }
3068
3069 memcpy(irk->val, val, 16);
3070 bacpy(&irk->rpa, rpa);
3071
ca9142b8 3072 return irk;
970c4e46
JH
3073}
3074
55ed8ca1
JH
3075int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3076{
3077 struct link_key *key;
3078
3079 key = hci_find_link_key(hdev, bdaddr);
3080 if (!key)
3081 return -ENOENT;
3082
6ed93dc6 3083 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3084
3085 list_del(&key->list);
3086 kfree(key);
3087
3088 return 0;
3089}
3090
e0b2b27e 3091int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3092{
3093 struct smp_ltk *k, *tmp;
c51ffa0b 3094 int removed = 0;
b899efaf
VCG
3095
3096 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3097 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3098 continue;
3099
6ed93dc6 3100 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3101
3102 list_del(&k->list);
3103 kfree(k);
c51ffa0b 3104 removed++;
b899efaf
VCG
3105 }
3106
c51ffa0b 3107 return removed ? 0 : -ENOENT;
b899efaf
VCG
3108}
3109
a7ec7338
JH
3110void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3111{
3112 struct smp_irk *k, *tmp;
3113
668b7b19 3114 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3115 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3116 continue;
3117
3118 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3119
3120 list_del(&k->list);
3121 kfree(k);
3122 }
3123}
3124
6bd32326 3125/* HCI command timer function */
65cc2b49 3126static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3127{
65cc2b49
MH
3128 struct hci_dev *hdev = container_of(work, struct hci_dev,
3129 cmd_timer.work);
6bd32326 3130
bda4f23a
AE
3131 if (hdev->sent_cmd) {
3132 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3133 u16 opcode = __le16_to_cpu(sent->opcode);
3134
3135 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3136 } else {
3137 BT_ERR("%s command tx timeout", hdev->name);
3138 }
3139
6bd32326 3140 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3141 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3142}
3143
2763eda6 3144struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3145 bdaddr_t *bdaddr)
2763eda6
SJ
3146{
3147 struct oob_data *data;
3148
3149 list_for_each_entry(data, &hdev->remote_oob_data, list)
3150 if (bacmp(bdaddr, &data->bdaddr) == 0)
3151 return data;
3152
3153 return NULL;
3154}
3155
3156int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3157{
3158 struct oob_data *data;
3159
3160 data = hci_find_remote_oob_data(hdev, bdaddr);
3161 if (!data)
3162 return -ENOENT;
3163
6ed93dc6 3164 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3165
3166 list_del(&data->list);
3167 kfree(data);
3168
3169 return 0;
3170}
3171
35f7498a 3172void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3173{
3174 struct oob_data *data, *n;
3175
3176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3177 list_del(&data->list);
3178 kfree(data);
3179 }
2763eda6
SJ
3180}
3181
0798872e
MH
3182int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 *hash, u8 *randomizer)
2763eda6
SJ
3184{
3185 struct oob_data *data;
3186
3187 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3188 if (!data) {
0a14ab41 3189 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3190 if (!data)
3191 return -ENOMEM;
3192
3193 bacpy(&data->bdaddr, bdaddr);
3194 list_add(&data->list, &hdev->remote_oob_data);
3195 }
3196
519ca9d0
MH
3197 memcpy(data->hash192, hash, sizeof(data->hash192));
3198 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3199
0798872e
MH
3200 memset(data->hash256, 0, sizeof(data->hash256));
3201 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3202
3203 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3204
3205 return 0;
3206}
3207
3208int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3209 u8 *hash192, u8 *randomizer192,
3210 u8 *hash256, u8 *randomizer256)
3211{
3212 struct oob_data *data;
3213
3214 data = hci_find_remote_oob_data(hdev, bdaddr);
3215 if (!data) {
0a14ab41 3216 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3217 if (!data)
3218 return -ENOMEM;
3219
3220 bacpy(&data->bdaddr, bdaddr);
3221 list_add(&data->list, &hdev->remote_oob_data);
3222 }
3223
3224 memcpy(data->hash192, hash192, sizeof(data->hash192));
3225 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3226
3227 memcpy(data->hash256, hash256, sizeof(data->hash256));
3228 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3229
6ed93dc6 3230 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3231
3232 return 0;
3233}
3234
b9ee0a78
MH
3235struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3236 bdaddr_t *bdaddr, u8 type)
b2a66aad 3237{
8035ded4 3238 struct bdaddr_list *b;
b2a66aad 3239
b9ee0a78
MH
3240 list_for_each_entry(b, &hdev->blacklist, list) {
3241 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3242 return b;
b9ee0a78 3243 }
b2a66aad
AJ
3244
3245 return NULL;
3246}
3247
c9507490 3248static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3249{
3250 struct list_head *p, *n;
3251
3252 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3253 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3254
3255 list_del(p);
3256 kfree(b);
3257 }
b2a66aad
AJ
3258}
3259
88c1fe4b 3260int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3261{
3262 struct bdaddr_list *entry;
b2a66aad 3263
b9ee0a78 3264 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3265 return -EBADF;
3266
b9ee0a78 3267 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3268 return -EEXIST;
b2a66aad
AJ
3269
3270 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3271 if (!entry)
3272 return -ENOMEM;
b2a66aad
AJ
3273
3274 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3275 entry->bdaddr_type = type;
b2a66aad
AJ
3276
3277 list_add(&entry->list, &hdev->blacklist);
3278
2a8357f2 3279 return 0;
b2a66aad
AJ
3280}
3281
88c1fe4b 3282int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3283{
3284 struct bdaddr_list *entry;
b2a66aad 3285
35f7498a
JH
3286 if (!bacmp(bdaddr, BDADDR_ANY)) {
3287 hci_blacklist_clear(hdev);
3288 return 0;
3289 }
b2a66aad 3290
b9ee0a78 3291 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3292 if (!entry)
5e762444 3293 return -ENOENT;
b2a66aad
AJ
3294
3295 list_del(&entry->list);
3296 kfree(entry);
3297
2a8357f2 3298 return 0;
b2a66aad
AJ
3299}
3300
d2ab0ac1
MH
3301struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3302 bdaddr_t *bdaddr, u8 type)
3303{
3304 struct bdaddr_list *b;
3305
3306 list_for_each_entry(b, &hdev->le_white_list, list) {
3307 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3308 return b;
3309 }
3310
3311 return NULL;
3312}
3313
3314void hci_white_list_clear(struct hci_dev *hdev)
3315{
3316 struct list_head *p, *n;
3317
3318 list_for_each_safe(p, n, &hdev->le_white_list) {
3319 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3320
3321 list_del(p);
3322 kfree(b);
3323 }
3324}
3325
3326int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3327{
3328 struct bdaddr_list *entry;
3329
3330 if (!bacmp(bdaddr, BDADDR_ANY))
3331 return -EBADF;
3332
3333 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3334 if (!entry)
3335 return -ENOMEM;
3336
3337 bacpy(&entry->bdaddr, bdaddr);
3338 entry->bdaddr_type = type;
3339
3340 list_add(&entry->list, &hdev->le_white_list);
3341
3342 return 0;
3343}
3344
3345int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3346{
3347 struct bdaddr_list *entry;
3348
3349 if (!bacmp(bdaddr, BDADDR_ANY))
3350 return -EBADF;
3351
3352 entry = hci_white_list_lookup(hdev, bdaddr, type);
3353 if (!entry)
3354 return -ENOENT;
3355
3356 list_del(&entry->list);
3357 kfree(entry);
3358
3359 return 0;
3360}
3361
15819a70
AG
3362/* This function requires the caller holds hdev->lock */
3363struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3364 bdaddr_t *addr, u8 addr_type)
3365{
3366 struct hci_conn_params *params;
3367
3368 list_for_each_entry(params, &hdev->le_conn_params, list) {
3369 if (bacmp(&params->addr, addr) == 0 &&
3370 params->addr_type == addr_type) {
3371 return params;
3372 }
3373 }
3374
3375 return NULL;
3376}
3377
cef952ce
AG
3378static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3379{
3380 struct hci_conn *conn;
3381
3382 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3383 if (!conn)
3384 return false;
3385
3386 if (conn->dst_type != type)
3387 return false;
3388
3389 if (conn->state != BT_CONNECTED)
3390 return false;
3391
3392 return true;
3393}
3394
a9b0a04c
AG
3395static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3396{
3397 if (addr_type == ADDR_LE_DEV_PUBLIC)
3398 return true;
3399
3400 /* Check for Random Static address type */
3401 if ((addr->b[5] & 0xc0) == 0xc0)
3402 return true;
3403
3404 return false;
3405}
3406
4b10966f
MH
3407/* This function requires the caller holds hdev->lock */
3408struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3409 bdaddr_t *addr, u8 addr_type)
3410{
3411 struct bdaddr_list *entry;
3412
3413 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3414 if (bacmp(&entry->bdaddr, addr) == 0 &&
3415 entry->bdaddr_type == addr_type)
3416 return entry;
3417 }
3418
3419 return NULL;
3420}
3421
3422/* This function requires the caller holds hdev->lock */
3423void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3424{
3425 struct bdaddr_list *entry;
3426
3427 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3428 if (entry)
3429 goto done;
3430
3431 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3432 if (!entry) {
3433 BT_ERR("Out of memory");
3434 return;
3435 }
3436
3437 bacpy(&entry->bdaddr, addr);
3438 entry->bdaddr_type = addr_type;
3439
3440 list_add(&entry->list, &hdev->pend_le_conns);
3441
3442 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3443
3444done:
3445 hci_update_background_scan(hdev);
3446}
3447
3448/* This function requires the caller holds hdev->lock */
3449void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3450{
3451 struct bdaddr_list *entry;
3452
3453 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3454 if (!entry)
3455 goto done;
3456
3457 list_del(&entry->list);
3458 kfree(entry);
3459
3460 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3461
3462done:
3463 hci_update_background_scan(hdev);
3464}
3465
3466/* This function requires the caller holds hdev->lock */
3467void hci_pend_le_conns_clear(struct hci_dev *hdev)
3468{
3469 struct bdaddr_list *entry, *tmp;
3470
3471 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3472 list_del(&entry->list);
3473 kfree(entry);
3474 }
3475
3476 BT_DBG("All LE pending connections cleared");
1c1697c0
MH
3477
3478 hci_update_background_scan(hdev);
4b10966f
MH
3479}
3480
15819a70 3481/* This function requires the caller holds hdev->lock */
51d167c0
MH
3482struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3483 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3484{
3485 struct hci_conn_params *params;
3486
3487 if (!is_identity_address(addr, addr_type))
51d167c0 3488 return NULL;
bf5b3c8b
MH
3489
3490 params = hci_conn_params_lookup(hdev, addr, addr_type);
3491 if (params)
51d167c0 3492 return params;
bf5b3c8b
MH
3493
3494 params = kzalloc(sizeof(*params), GFP_KERNEL);
3495 if (!params) {
3496 BT_ERR("Out of memory");
51d167c0 3497 return NULL;
bf5b3c8b
MH
3498 }
3499
3500 bacpy(&params->addr, addr);
3501 params->addr_type = addr_type;
3502
3503 list_add(&params->list, &hdev->le_conn_params);
3504
3505 params->conn_min_interval = hdev->le_conn_min_interval;
3506 params->conn_max_interval = hdev->le_conn_max_interval;
3507 params->conn_latency = hdev->le_conn_latency;
3508 params->supervision_timeout = hdev->le_supv_timeout;
3509 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3510
3511 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3512
51d167c0 3513 return params;
bf5b3c8b
MH
3514}
3515
3516/* This function requires the caller holds hdev->lock */
3517int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3518 u8 auto_connect)
15819a70
AG
3519{
3520 struct hci_conn_params *params;
3521
8c87aae1
MH
3522 params = hci_conn_params_add(hdev, addr, addr_type);
3523 if (!params)
3524 return -EIO;
cef952ce 3525
9fcb18ef 3526 params->auto_connect = auto_connect;
15819a70 3527
cef952ce
AG
3528 switch (auto_connect) {
3529 case HCI_AUTO_CONN_DISABLED:
3530 case HCI_AUTO_CONN_LINK_LOSS:
3531 hci_pend_le_conn_del(hdev, addr, addr_type);
3532 break;
3533 case HCI_AUTO_CONN_ALWAYS:
3534 if (!is_connected(hdev, addr, addr_type))
3535 hci_pend_le_conn_add(hdev, addr, addr_type);
3536 break;
3537 }
15819a70 3538
d06b50ce
MH
3539 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3540 auto_connect);
a9b0a04c
AG
3541
3542 return 0;
15819a70
AG
3543}
3544
3545/* This function requires the caller holds hdev->lock */
3546void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3547{
3548 struct hci_conn_params *params;
3549
3550 params = hci_conn_params_lookup(hdev, addr, addr_type);
3551 if (!params)
3552 return;
3553
cef952ce
AG
3554 hci_pend_le_conn_del(hdev, addr, addr_type);
3555
15819a70
AG
3556 list_del(&params->list);
3557 kfree(params);
3558
3559 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3560}
3561
3562/* This function requires the caller holds hdev->lock */
3563void hci_conn_params_clear(struct hci_dev *hdev)
3564{
3565 struct hci_conn_params *params, *tmp;
3566
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3568 list_del(&params->list);
3569 kfree(params);
3570 }
3571
1089b67d
MH
3572 hci_pend_le_conns_clear(hdev);
3573
15819a70
AG
3574 BT_DBG("All LE connection parameters were removed");
3575}
3576
4c87eaab 3577static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3578{
4c87eaab
AG
3579 if (status) {
3580 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3581
4c87eaab
AG
3582 hci_dev_lock(hdev);
3583 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3584 hci_dev_unlock(hdev);
3585 return;
3586 }
7ba8b4be
AG
3587}
3588
4c87eaab 3589static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3590{
4c87eaab
AG
3591 /* General inquiry access code (GIAC) */
3592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3593 struct hci_request req;
3594 struct hci_cp_inquiry cp;
7ba8b4be
AG
3595 int err;
3596
4c87eaab
AG
3597 if (status) {
3598 BT_ERR("Failed to disable LE scanning: status %d", status);
3599 return;
3600 }
7ba8b4be 3601
4c87eaab
AG
3602 switch (hdev->discovery.type) {
3603 case DISCOV_TYPE_LE:
3604 hci_dev_lock(hdev);
3605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3606 hci_dev_unlock(hdev);
3607 break;
7ba8b4be 3608
4c87eaab
AG
3609 case DISCOV_TYPE_INTERLEAVED:
3610 hci_req_init(&req, hdev);
7ba8b4be 3611
4c87eaab
AG
3612 memset(&cp, 0, sizeof(cp));
3613 memcpy(&cp.lap, lap, sizeof(cp.lap));
3614 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3615 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3616
4c87eaab 3617 hci_dev_lock(hdev);
7dbfac1d 3618
4c87eaab 3619 hci_inquiry_cache_flush(hdev);
7dbfac1d 3620
4c87eaab
AG
3621 err = hci_req_run(&req, inquiry_complete);
3622 if (err) {
3623 BT_ERR("Inquiry request failed: err %d", err);
3624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3625 }
7dbfac1d 3626
4c87eaab
AG
3627 hci_dev_unlock(hdev);
3628 break;
7dbfac1d 3629 }
7dbfac1d
AG
3630}
3631
7ba8b4be
AG
3632static void le_scan_disable_work(struct work_struct *work)
3633{
3634 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3635 le_scan_disable.work);
4c87eaab
AG
3636 struct hci_request req;
3637 int err;
7ba8b4be
AG
3638
3639 BT_DBG("%s", hdev->name);
3640
4c87eaab 3641 hci_req_init(&req, hdev);
28b75a89 3642
b1efcc28 3643 hci_req_add_le_scan_disable(&req);
28b75a89 3644
4c87eaab
AG
3645 err = hci_req_run(&req, le_scan_disable_work_complete);
3646 if (err)
3647 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3648}
3649
8d97250e
JH
3650static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3651{
3652 struct hci_dev *hdev = req->hdev;
3653
3654 /* If we're advertising or initiating an LE connection we can't
3655 * go ahead and change the random address at this time. This is
3656 * because the eventual initiator address used for the
3657 * subsequently created connection will be undefined (some
3658 * controllers use the new address and others the one we had
3659 * when the operation started).
3660 *
3661 * In this kind of scenario skip the update and let the random
3662 * address be updated at the next cycle.
3663 */
3664 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3665 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3666 BT_DBG("Deferring random address update");
3667 return;
3668 }
3669
3670 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3671}
3672
94b1fc92
MH
3673int hci_update_random_address(struct hci_request *req, bool require_privacy,
3674 u8 *own_addr_type)
ebd3a747
JH
3675{
3676 struct hci_dev *hdev = req->hdev;
3677 int err;
3678
3679 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3680 * current RPA has expired or there is something else than
3681 * the current RPA in use, then generate a new one.
ebd3a747
JH
3682 */
3683 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3684 int to;
3685
3686 *own_addr_type = ADDR_LE_DEV_RANDOM;
3687
3688 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3689 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3690 return 0;
3691
2b5224dc 3692 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3693 if (err < 0) {
3694 BT_ERR("%s failed to generate new RPA", hdev->name);
3695 return err;
3696 }
3697
8d97250e 3698 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3699
3700 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3701 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3702
3703 return 0;
94b1fc92
MH
3704 }
3705
3706 /* In case of required privacy without resolvable private address,
3707 * use an unresolvable private address. This is useful for active
3708 * scanning and non-connectable advertising.
3709 */
3710 if (require_privacy) {
3711 bdaddr_t urpa;
3712
3713 get_random_bytes(&urpa, 6);
3714 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3715
3716 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3717 set_random_addr(req, &urpa);
94b1fc92 3718 return 0;
ebd3a747
JH
3719 }
3720
3721 /* If forcing static address is in use or there is no public
3722 * address use the static address as random address (but skip
3723 * the HCI command if the current random address is already the
3724 * static one.
3725 */
111902f7 3726 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3727 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3728 *own_addr_type = ADDR_LE_DEV_RANDOM;
3729 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3730 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3731 &hdev->static_addr);
3732 return 0;
3733 }
3734
3735 /* Neither privacy nor static address is being used so use a
3736 * public address.
3737 */
3738 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3739
3740 return 0;
3741}
3742
a1f4c318
JH
3743/* Copy the Identity Address of the controller.
3744 *
3745 * If the controller has a public BD_ADDR, then by default use that one.
3746 * If this is a LE only controller without a public address, default to
3747 * the static random address.
3748 *
3749 * For debugging purposes it is possible to force controllers with a
3750 * public address to use the static random address instead.
3751 */
3752void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3753 u8 *bdaddr_type)
3754{
111902f7 3755 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3756 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3757 bacpy(bdaddr, &hdev->static_addr);
3758 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3759 } else {
3760 bacpy(bdaddr, &hdev->bdaddr);
3761 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3762 }
3763}
3764
9be0dab7
DH
3765/* Alloc HCI device */
3766struct hci_dev *hci_alloc_dev(void)
3767{
3768 struct hci_dev *hdev;
3769
3770 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3771 if (!hdev)
3772 return NULL;
3773
b1b813d4
DH
3774 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3775 hdev->esco_type = (ESCO_HV1);
3776 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3777 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3778 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3779 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3780 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3781
b1b813d4
DH
3782 hdev->sniff_max_interval = 800;
3783 hdev->sniff_min_interval = 80;
3784
3f959d46 3785 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3786 hdev->le_scan_interval = 0x0060;
3787 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3788 hdev->le_conn_min_interval = 0x0028;
3789 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3790 hdev->le_conn_latency = 0x0000;
3791 hdev->le_supv_timeout = 0x002a;
bef64738 3792
d6bfd59c 3793 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3794 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3795 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3796 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3797
b1b813d4
DH
3798 mutex_init(&hdev->lock);
3799 mutex_init(&hdev->req_lock);
3800
3801 INIT_LIST_HEAD(&hdev->mgmt_pending);
3802 INIT_LIST_HEAD(&hdev->blacklist);
3803 INIT_LIST_HEAD(&hdev->uuids);
3804 INIT_LIST_HEAD(&hdev->link_keys);
3805 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3806 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3807 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3808 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3809 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3810 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3811 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3812
3813 INIT_WORK(&hdev->rx_work, hci_rx_work);
3814 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3815 INIT_WORK(&hdev->tx_work, hci_tx_work);
3816 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3817
b1b813d4
DH
3818 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3819 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3820 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3821
b1b813d4
DH
3822 skb_queue_head_init(&hdev->rx_q);
3823 skb_queue_head_init(&hdev->cmd_q);
3824 skb_queue_head_init(&hdev->raw_q);
3825
3826 init_waitqueue_head(&hdev->req_wait_q);
3827
65cc2b49 3828 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3829
b1b813d4
DH
3830 hci_init_sysfs(hdev);
3831 discovery_init(hdev);
9be0dab7
DH
3832
3833 return hdev;
3834}
3835EXPORT_SYMBOL(hci_alloc_dev);
3836
3837/* Free HCI device */
3838void hci_free_dev(struct hci_dev *hdev)
3839{
9be0dab7
DH
3840 /* will free via device release */
3841 put_device(&hdev->dev);
3842}
3843EXPORT_SYMBOL(hci_free_dev);
3844
1da177e4
LT
3845/* Register HCI device */
3846int hci_register_dev(struct hci_dev *hdev)
3847{
b1b813d4 3848 int id, error;
1da177e4 3849
010666a1 3850 if (!hdev->open || !hdev->close)
1da177e4
LT
3851 return -EINVAL;
3852
08add513
MM
3853 /* Do not allow HCI_AMP devices to register at index 0,
3854 * so the index can be used as the AMP controller ID.
3855 */
3df92b31
SL
3856 switch (hdev->dev_type) {
3857 case HCI_BREDR:
3858 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3859 break;
3860 case HCI_AMP:
3861 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3862 break;
3863 default:
3864 return -EINVAL;
1da177e4 3865 }
8e87d142 3866
3df92b31
SL
3867 if (id < 0)
3868 return id;
3869
1da177e4
LT
3870 sprintf(hdev->name, "hci%d", id);
3871 hdev->id = id;
2d8b3a11
AE
3872
3873 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3874
d8537548
KC
3875 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3876 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3877 if (!hdev->workqueue) {
3878 error = -ENOMEM;
3879 goto err;
3880 }
f48fd9c8 3881
d8537548
KC
3882 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3883 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3884 if (!hdev->req_workqueue) {
3885 destroy_workqueue(hdev->workqueue);
3886 error = -ENOMEM;
3887 goto err;
3888 }
3889
0153e2ec
MH
3890 if (!IS_ERR_OR_NULL(bt_debugfs))
3891 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3892
bdc3e0f1
MH
3893 dev_set_name(&hdev->dev, "%s", hdev->name);
3894
99780a7b
JH
3895 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3896 CRYPTO_ALG_ASYNC);
3897 if (IS_ERR(hdev->tfm_aes)) {
3898 BT_ERR("Unable to create crypto context");
3899 error = PTR_ERR(hdev->tfm_aes);
3900 hdev->tfm_aes = NULL;
3901 goto err_wqueue;
3902 }
3903
bdc3e0f1 3904 error = device_add(&hdev->dev);
33ca954d 3905 if (error < 0)
99780a7b 3906 goto err_tfm;
1da177e4 3907
611b30f7 3908 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3909 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3910 hdev);
611b30f7
MH
3911 if (hdev->rfkill) {
3912 if (rfkill_register(hdev->rfkill) < 0) {
3913 rfkill_destroy(hdev->rfkill);
3914 hdev->rfkill = NULL;
3915 }
3916 }
3917
5e130367
JH
3918 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3919 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3920
a8b2d5c2 3921 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3922 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3923
01cd3404 3924 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3925 /* Assume BR/EDR support until proven otherwise (such as
3926 * through reading supported features during init.
3927 */
3928 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3929 }
ce2be9ac 3930
fcee3377
GP
3931 write_lock(&hci_dev_list_lock);
3932 list_add(&hdev->list, &hci_dev_list);
3933 write_unlock(&hci_dev_list_lock);
3934
fee746b0
MH
3935 /* Devices that are marked for raw-only usage need to set
3936 * the HCI_RAW flag to indicate that only user channel is
3937 * supported.
3938 */
3939 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3940 set_bit(HCI_RAW, &hdev->flags);
3941
1da177e4 3942 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3943 hci_dev_hold(hdev);
1da177e4 3944
19202573 3945 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3946
1da177e4 3947 return id;
f48fd9c8 3948
99780a7b
JH
3949err_tfm:
3950 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3951err_wqueue:
3952 destroy_workqueue(hdev->workqueue);
6ead1bbc 3953 destroy_workqueue(hdev->req_workqueue);
33ca954d 3954err:
3df92b31 3955 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3956
33ca954d 3957 return error;
1da177e4
LT
3958}
3959EXPORT_SYMBOL(hci_register_dev);
3960
3961/* Unregister HCI device */
59735631 3962void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3963{
3df92b31 3964 int i, id;
ef222013 3965
c13854ce 3966 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3967
94324962
JH
3968 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3969
3df92b31
SL
3970 id = hdev->id;
3971
f20d09d5 3972 write_lock(&hci_dev_list_lock);
1da177e4 3973 list_del(&hdev->list);
f20d09d5 3974 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3975
3976 hci_dev_do_close(hdev);
3977
cd4c5391 3978 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3979 kfree_skb(hdev->reassembly[i]);
3980
b9b5ef18
GP
3981 cancel_work_sync(&hdev->power_on);
3982
ab81cbf9 3983 if (!test_bit(HCI_INIT, &hdev->flags) &&
fee746b0
MH
3984 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3985 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
09fd0de5 3986 hci_dev_lock(hdev);
744cf19e 3987 mgmt_index_removed(hdev);
09fd0de5 3988 hci_dev_unlock(hdev);
56e5cb86 3989 }
ab81cbf9 3990
2e58ef3e
JH
3991 /* mgmt_index_removed should take care of emptying the
3992 * pending list */
3993 BUG_ON(!list_empty(&hdev->mgmt_pending));
3994
1da177e4
LT
3995 hci_notify(hdev, HCI_DEV_UNREG);
3996
611b30f7
MH
3997 if (hdev->rfkill) {
3998 rfkill_unregister(hdev->rfkill);
3999 rfkill_destroy(hdev->rfkill);
4000 }
4001
99780a7b
JH
4002 if (hdev->tfm_aes)
4003 crypto_free_blkcipher(hdev->tfm_aes);
4004
bdc3e0f1 4005 device_del(&hdev->dev);
147e2d59 4006
0153e2ec
MH
4007 debugfs_remove_recursive(hdev->debugfs);
4008
f48fd9c8 4009 destroy_workqueue(hdev->workqueue);
6ead1bbc 4010 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4011
09fd0de5 4012 hci_dev_lock(hdev);
e2e0cacb 4013 hci_blacklist_clear(hdev);
2aeb9a1a 4014 hci_uuids_clear(hdev);
55ed8ca1 4015 hci_link_keys_clear(hdev);
b899efaf 4016 hci_smp_ltks_clear(hdev);
970c4e46 4017 hci_smp_irks_clear(hdev);
2763eda6 4018 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4019 hci_white_list_clear(hdev);
15819a70 4020 hci_conn_params_clear(hdev);
09fd0de5 4021 hci_dev_unlock(hdev);
e2e0cacb 4022
dc946bd8 4023 hci_dev_put(hdev);
3df92b31
SL
4024
4025 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4026}
4027EXPORT_SYMBOL(hci_unregister_dev);
4028
4029/* Suspend HCI device */
4030int hci_suspend_dev(struct hci_dev *hdev)
4031{
4032 hci_notify(hdev, HCI_DEV_SUSPEND);
4033 return 0;
4034}
4035EXPORT_SYMBOL(hci_suspend_dev);
4036
4037/* Resume HCI device */
4038int hci_resume_dev(struct hci_dev *hdev)
4039{
4040 hci_notify(hdev, HCI_DEV_RESUME);
4041 return 0;
4042}
4043EXPORT_SYMBOL(hci_resume_dev);
4044
76bca880 4045/* Receive frame from HCI drivers */
e1a26170 4046int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4047{
76bca880 4048 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4049 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4050 kfree_skb(skb);
4051 return -ENXIO;
4052 }
4053
d82603c6 4054 /* Incoming skb */
76bca880
MH
4055 bt_cb(skb)->incoming = 1;
4056
4057 /* Time stamp */
4058 __net_timestamp(skb);
4059
76bca880 4060 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4061 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4062
76bca880
MH
4063 return 0;
4064}
4065EXPORT_SYMBOL(hci_recv_frame);
4066
33e882a5 4067static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4068 int count, __u8 index)
33e882a5
SS
4069{
4070 int len = 0;
4071 int hlen = 0;
4072 int remain = count;
4073 struct sk_buff *skb;
4074 struct bt_skb_cb *scb;
4075
4076 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4077 index >= NUM_REASSEMBLY)
33e882a5
SS
4078 return -EILSEQ;
4079
4080 skb = hdev->reassembly[index];
4081
4082 if (!skb) {
4083 switch (type) {
4084 case HCI_ACLDATA_PKT:
4085 len = HCI_MAX_FRAME_SIZE;
4086 hlen = HCI_ACL_HDR_SIZE;
4087 break;
4088 case HCI_EVENT_PKT:
4089 len = HCI_MAX_EVENT_SIZE;
4090 hlen = HCI_EVENT_HDR_SIZE;
4091 break;
4092 case HCI_SCODATA_PKT:
4093 len = HCI_MAX_SCO_SIZE;
4094 hlen = HCI_SCO_HDR_SIZE;
4095 break;
4096 }
4097
1e429f38 4098 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4099 if (!skb)
4100 return -ENOMEM;
4101
4102 scb = (void *) skb->cb;
4103 scb->expect = hlen;
4104 scb->pkt_type = type;
4105
33e882a5
SS
4106 hdev->reassembly[index] = skb;
4107 }
4108
4109 while (count) {
4110 scb = (void *) skb->cb;
89bb46d0 4111 len = min_t(uint, scb->expect, count);
33e882a5
SS
4112
4113 memcpy(skb_put(skb, len), data, len);
4114
4115 count -= len;
4116 data += len;
4117 scb->expect -= len;
4118 remain = count;
4119
4120 switch (type) {
4121 case HCI_EVENT_PKT:
4122 if (skb->len == HCI_EVENT_HDR_SIZE) {
4123 struct hci_event_hdr *h = hci_event_hdr(skb);
4124 scb->expect = h->plen;
4125
4126 if (skb_tailroom(skb) < scb->expect) {
4127 kfree_skb(skb);
4128 hdev->reassembly[index] = NULL;
4129 return -ENOMEM;
4130 }
4131 }
4132 break;
4133
4134 case HCI_ACLDATA_PKT:
4135 if (skb->len == HCI_ACL_HDR_SIZE) {
4136 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4137 scb->expect = __le16_to_cpu(h->dlen);
4138
4139 if (skb_tailroom(skb) < scb->expect) {
4140 kfree_skb(skb);
4141 hdev->reassembly[index] = NULL;
4142 return -ENOMEM;
4143 }
4144 }
4145 break;
4146
4147 case HCI_SCODATA_PKT:
4148 if (skb->len == HCI_SCO_HDR_SIZE) {
4149 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4150 scb->expect = h->dlen;
4151
4152 if (skb_tailroom(skb) < scb->expect) {
4153 kfree_skb(skb);
4154 hdev->reassembly[index] = NULL;
4155 return -ENOMEM;
4156 }
4157 }
4158 break;
4159 }
4160
4161 if (scb->expect == 0) {
4162 /* Complete frame */
4163
4164 bt_cb(skb)->pkt_type = type;
e1a26170 4165 hci_recv_frame(hdev, skb);
33e882a5
SS
4166
4167 hdev->reassembly[index] = NULL;
4168 return remain;
4169 }
4170 }
4171
4172 return remain;
4173}
4174
ef222013
MH
4175int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4176{
f39a3c06
SS
4177 int rem = 0;
4178
ef222013
MH
4179 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4180 return -EILSEQ;
4181
da5f6c37 4182 while (count) {
1e429f38 4183 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4184 if (rem < 0)
4185 return rem;
ef222013 4186
f39a3c06
SS
4187 data += (count - rem);
4188 count = rem;
f81c6224 4189 }
ef222013 4190
f39a3c06 4191 return rem;
ef222013
MH
4192}
4193EXPORT_SYMBOL(hci_recv_fragment);
4194
99811510
SS
4195#define STREAM_REASSEMBLY 0
4196
4197int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4198{
4199 int type;
4200 int rem = 0;
4201
da5f6c37 4202 while (count) {
99811510
SS
4203 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4204
4205 if (!skb) {
4206 struct { char type; } *pkt;
4207
4208 /* Start of the frame */
4209 pkt = data;
4210 type = pkt->type;
4211
4212 data++;
4213 count--;
4214 } else
4215 type = bt_cb(skb)->pkt_type;
4216
1e429f38 4217 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4218 STREAM_REASSEMBLY);
99811510
SS
4219 if (rem < 0)
4220 return rem;
4221
4222 data += (count - rem);
4223 count = rem;
f81c6224 4224 }
99811510
SS
4225
4226 return rem;
4227}
4228EXPORT_SYMBOL(hci_recv_stream_fragment);
4229
1da177e4
LT
4230/* ---- Interface to upper protocols ---- */
4231
1da177e4
LT
4232int hci_register_cb(struct hci_cb *cb)
4233{
4234 BT_DBG("%p name %s", cb, cb->name);
4235
f20d09d5 4236 write_lock(&hci_cb_list_lock);
1da177e4 4237 list_add(&cb->list, &hci_cb_list);
f20d09d5 4238 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4239
4240 return 0;
4241}
4242EXPORT_SYMBOL(hci_register_cb);
4243
4244int hci_unregister_cb(struct hci_cb *cb)
4245{
4246 BT_DBG("%p name %s", cb, cb->name);
4247
f20d09d5 4248 write_lock(&hci_cb_list_lock);
1da177e4 4249 list_del(&cb->list);
f20d09d5 4250 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4251
4252 return 0;
4253}
4254EXPORT_SYMBOL(hci_unregister_cb);
4255
51086991 4256static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4257{
0d48d939 4258 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4259
cd82e61c
MH
4260 /* Time stamp */
4261 __net_timestamp(skb);
1da177e4 4262
cd82e61c
MH
4263 /* Send copy to monitor */
4264 hci_send_to_monitor(hdev, skb);
4265
4266 if (atomic_read(&hdev->promisc)) {
4267 /* Send copy to the sockets */
470fe1b5 4268 hci_send_to_sock(hdev, skb);
1da177e4
LT
4269 }
4270
4271 /* Get rid of skb owner, prior to sending to the driver. */
4272 skb_orphan(skb);
4273
7bd8f09f 4274 if (hdev->send(hdev, skb) < 0)
51086991 4275 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4276}
4277
3119ae95
JH
4278void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4279{
4280 skb_queue_head_init(&req->cmd_q);
4281 req->hdev = hdev;
5d73e034 4282 req->err = 0;
3119ae95
JH
4283}
4284
4285int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4286{
4287 struct hci_dev *hdev = req->hdev;
4288 struct sk_buff *skb;
4289 unsigned long flags;
4290
4291 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4292
5d73e034
AG
4293 /* If an error occured during request building, remove all HCI
4294 * commands queued on the HCI request queue.
4295 */
4296 if (req->err) {
4297 skb_queue_purge(&req->cmd_q);
4298 return req->err;
4299 }
4300
3119ae95
JH
4301 /* Do not allow empty requests */
4302 if (skb_queue_empty(&req->cmd_q))
382b0c39 4303 return -ENODATA;
3119ae95
JH
4304
4305 skb = skb_peek_tail(&req->cmd_q);
4306 bt_cb(skb)->req.complete = complete;
4307
4308 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4309 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4310 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4311
4312 queue_work(hdev->workqueue, &hdev->cmd_work);
4313
4314 return 0;
4315}
4316
1ca3a9d0 4317static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4318 u32 plen, const void *param)
1da177e4
LT
4319{
4320 int len = HCI_COMMAND_HDR_SIZE + plen;
4321 struct hci_command_hdr *hdr;
4322 struct sk_buff *skb;
4323
1da177e4 4324 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4325 if (!skb)
4326 return NULL;
1da177e4
LT
4327
4328 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4329 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4330 hdr->plen = plen;
4331
4332 if (plen)
4333 memcpy(skb_put(skb, plen), param, plen);
4334
4335 BT_DBG("skb len %d", skb->len);
4336
0d48d939 4337 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4338
1ca3a9d0
JH
4339 return skb;
4340}
4341
4342/* Send HCI command */
07dc93dd
JH
4343int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4344 const void *param)
1ca3a9d0
JH
4345{
4346 struct sk_buff *skb;
4347
4348 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4349
4350 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4351 if (!skb) {
4352 BT_ERR("%s no memory for command", hdev->name);
4353 return -ENOMEM;
4354 }
4355
11714b3d
JH
4356 /* Stand-alone HCI commands must be flaged as
4357 * single-command requests.
4358 */
4359 bt_cb(skb)->req.start = true;
4360
1da177e4 4361 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4362 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4363
4364 return 0;
4365}
1da177e4 4366
71c76a17 4367/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4368void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4369 const void *param, u8 event)
71c76a17
JH
4370{
4371 struct hci_dev *hdev = req->hdev;
4372 struct sk_buff *skb;
4373
4374 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4375
34739c1e
AG
4376 /* If an error occured during request building, there is no point in
4377 * queueing the HCI command. We can simply return.
4378 */
4379 if (req->err)
4380 return;
4381
71c76a17
JH
4382 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4383 if (!skb) {
5d73e034
AG
4384 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4385 hdev->name, opcode);
4386 req->err = -ENOMEM;
e348fe6b 4387 return;
71c76a17
JH
4388 }
4389
4390 if (skb_queue_empty(&req->cmd_q))
4391 bt_cb(skb)->req.start = true;
4392
02350a72
JH
4393 bt_cb(skb)->req.event = event;
4394
71c76a17 4395 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4396}
4397
07dc93dd
JH
4398void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4399 const void *param)
02350a72
JH
4400{
4401 hci_req_add_ev(req, opcode, plen, param, 0);
4402}
4403
1da177e4 4404/* Get data from the previously sent command */
a9de9248 4405void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4406{
4407 struct hci_command_hdr *hdr;
4408
4409 if (!hdev->sent_cmd)
4410 return NULL;
4411
4412 hdr = (void *) hdev->sent_cmd->data;
4413
a9de9248 4414 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4415 return NULL;
4416
f0e09510 4417 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4418
4419 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4420}
4421
4422/* Send ACL data */
4423static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4424{
4425 struct hci_acl_hdr *hdr;
4426 int len = skb->len;
4427
badff6d0
ACM
4428 skb_push(skb, HCI_ACL_HDR_SIZE);
4429 skb_reset_transport_header(skb);
9c70220b 4430 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4431 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4432 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4433}
4434
ee22be7e 4435static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4436 struct sk_buff *skb, __u16 flags)
1da177e4 4437{
ee22be7e 4438 struct hci_conn *conn = chan->conn;
1da177e4
LT
4439 struct hci_dev *hdev = conn->hdev;
4440 struct sk_buff *list;
4441
087bfd99
GP
4442 skb->len = skb_headlen(skb);
4443 skb->data_len = 0;
4444
4445 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4446
4447 switch (hdev->dev_type) {
4448 case HCI_BREDR:
4449 hci_add_acl_hdr(skb, conn->handle, flags);
4450 break;
4451 case HCI_AMP:
4452 hci_add_acl_hdr(skb, chan->handle, flags);
4453 break;
4454 default:
4455 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4456 return;
4457 }
087bfd99 4458
70f23020
AE
4459 list = skb_shinfo(skb)->frag_list;
4460 if (!list) {
1da177e4
LT
4461 /* Non fragmented */
4462 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4463
73d80deb 4464 skb_queue_tail(queue, skb);
1da177e4
LT
4465 } else {
4466 /* Fragmented */
4467 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4468
4469 skb_shinfo(skb)->frag_list = NULL;
4470
4471 /* Queue all fragments atomically */
af3e6359 4472 spin_lock(&queue->lock);
1da177e4 4473
73d80deb 4474 __skb_queue_tail(queue, skb);
e702112f
AE
4475
4476 flags &= ~ACL_START;
4477 flags |= ACL_CONT;
1da177e4
LT
4478 do {
4479 skb = list; list = list->next;
8e87d142 4480
0d48d939 4481 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4482 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4483
4484 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4485
73d80deb 4486 __skb_queue_tail(queue, skb);
1da177e4
LT
4487 } while (list);
4488
af3e6359 4489 spin_unlock(&queue->lock);
1da177e4 4490 }
73d80deb
LAD
4491}
4492
4493void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4494{
ee22be7e 4495 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4496
f0e09510 4497 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4498
ee22be7e 4499 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4500
3eff45ea 4501 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4502}
1da177e4
LT
4503
4504/* Send SCO data */
0d861d8b 4505void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4506{
4507 struct hci_dev *hdev = conn->hdev;
4508 struct hci_sco_hdr hdr;
4509
4510 BT_DBG("%s len %d", hdev->name, skb->len);
4511
aca3192c 4512 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4513 hdr.dlen = skb->len;
4514
badff6d0
ACM
4515 skb_push(skb, HCI_SCO_HDR_SIZE);
4516 skb_reset_transport_header(skb);
9c70220b 4517 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4518
0d48d939 4519 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4520
1da177e4 4521 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4522 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4523}
1da177e4
LT
4524
4525/* ---- HCI TX task (outgoing data) ---- */
4526
4527/* HCI Connection scheduler */
6039aa73
GP
4528static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4529 int *quote)
1da177e4
LT
4530{
4531 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4532 struct hci_conn *conn = NULL, *c;
abc5de8f 4533 unsigned int num = 0, min = ~0;
1da177e4 4534
8e87d142 4535 /* We don't have to lock device here. Connections are always
1da177e4 4536 * added and removed with TX task disabled. */
bf4c6325
GP
4537
4538 rcu_read_lock();
4539
4540 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4541 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4542 continue;
769be974
MH
4543
4544 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4545 continue;
4546
1da177e4
LT
4547 num++;
4548
4549 if (c->sent < min) {
4550 min = c->sent;
4551 conn = c;
4552 }
52087a79
LAD
4553
4554 if (hci_conn_num(hdev, type) == num)
4555 break;
1da177e4
LT
4556 }
4557
bf4c6325
GP
4558 rcu_read_unlock();
4559
1da177e4 4560 if (conn) {
6ed58ec5
VT
4561 int cnt, q;
4562
4563 switch (conn->type) {
4564 case ACL_LINK:
4565 cnt = hdev->acl_cnt;
4566 break;
4567 case SCO_LINK:
4568 case ESCO_LINK:
4569 cnt = hdev->sco_cnt;
4570 break;
4571 case LE_LINK:
4572 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4573 break;
4574 default:
4575 cnt = 0;
4576 BT_ERR("Unknown link type");
4577 }
4578
4579 q = cnt / num;
1da177e4
LT
4580 *quote = q ? q : 1;
4581 } else
4582 *quote = 0;
4583
4584 BT_DBG("conn %p quote %d", conn, *quote);
4585 return conn;
4586}
4587
6039aa73 4588static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4589{
4590 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4591 struct hci_conn *c;
1da177e4 4592
bae1f5d9 4593 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4594
bf4c6325
GP
4595 rcu_read_lock();
4596
1da177e4 4597 /* Kill stalled connections */
bf4c6325 4598 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4599 if (c->type == type && c->sent) {
6ed93dc6
AE
4600 BT_ERR("%s killing stalled connection %pMR",
4601 hdev->name, &c->dst);
bed71748 4602 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4603 }
4604 }
bf4c6325
GP
4605
4606 rcu_read_unlock();
1da177e4
LT
4607}
4608
6039aa73
GP
4609static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4610 int *quote)
1da177e4 4611{
73d80deb
LAD
4612 struct hci_conn_hash *h = &hdev->conn_hash;
4613 struct hci_chan *chan = NULL;
abc5de8f 4614 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4615 struct hci_conn *conn;
73d80deb
LAD
4616 int cnt, q, conn_num = 0;
4617
4618 BT_DBG("%s", hdev->name);
4619
bf4c6325
GP
4620 rcu_read_lock();
4621
4622 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4623 struct hci_chan *tmp;
4624
4625 if (conn->type != type)
4626 continue;
4627
4628 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4629 continue;
4630
4631 conn_num++;
4632
8192edef 4633 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4634 struct sk_buff *skb;
4635
4636 if (skb_queue_empty(&tmp->data_q))
4637 continue;
4638
4639 skb = skb_peek(&tmp->data_q);
4640 if (skb->priority < cur_prio)
4641 continue;
4642
4643 if (skb->priority > cur_prio) {
4644 num = 0;
4645 min = ~0;
4646 cur_prio = skb->priority;
4647 }
4648
4649 num++;
4650
4651 if (conn->sent < min) {
4652 min = conn->sent;
4653 chan = tmp;
4654 }
4655 }
4656
4657 if (hci_conn_num(hdev, type) == conn_num)
4658 break;
4659 }
4660
bf4c6325
GP
4661 rcu_read_unlock();
4662
73d80deb
LAD
4663 if (!chan)
4664 return NULL;
4665
4666 switch (chan->conn->type) {
4667 case ACL_LINK:
4668 cnt = hdev->acl_cnt;
4669 break;
bd1eb66b
AE
4670 case AMP_LINK:
4671 cnt = hdev->block_cnt;
4672 break;
73d80deb
LAD
4673 case SCO_LINK:
4674 case ESCO_LINK:
4675 cnt = hdev->sco_cnt;
4676 break;
4677 case LE_LINK:
4678 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4679 break;
4680 default:
4681 cnt = 0;
4682 BT_ERR("Unknown link type");
4683 }
4684
4685 q = cnt / num;
4686 *quote = q ? q : 1;
4687 BT_DBG("chan %p quote %d", chan, *quote);
4688 return chan;
4689}
4690
02b20f0b
LAD
4691static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4692{
4693 struct hci_conn_hash *h = &hdev->conn_hash;
4694 struct hci_conn *conn;
4695 int num = 0;
4696
4697 BT_DBG("%s", hdev->name);
4698
bf4c6325
GP
4699 rcu_read_lock();
4700
4701 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4702 struct hci_chan *chan;
4703
4704 if (conn->type != type)
4705 continue;
4706
4707 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4708 continue;
4709
4710 num++;
4711
8192edef 4712 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4713 struct sk_buff *skb;
4714
4715 if (chan->sent) {
4716 chan->sent = 0;
4717 continue;
4718 }
4719
4720 if (skb_queue_empty(&chan->data_q))
4721 continue;
4722
4723 skb = skb_peek(&chan->data_q);
4724 if (skb->priority >= HCI_PRIO_MAX - 1)
4725 continue;
4726
4727 skb->priority = HCI_PRIO_MAX - 1;
4728
4729 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4730 skb->priority);
02b20f0b
LAD
4731 }
4732
4733 if (hci_conn_num(hdev, type) == num)
4734 break;
4735 }
bf4c6325
GP
4736
4737 rcu_read_unlock();
4738
02b20f0b
LAD
4739}
4740
b71d385a
AE
4741static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4742{
4743 /* Calculate count of blocks used by this packet */
4744 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4745}
4746
6039aa73 4747static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4748{
fee746b0 4749 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
1da177e4
LT
4750 /* ACL tx timeout must be longer than maximum
4751 * link supervision timeout (40.9 seconds) */
63d2bc1b 4752 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4753 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4754 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4755 }
63d2bc1b 4756}
1da177e4 4757
6039aa73 4758static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4759{
4760 unsigned int cnt = hdev->acl_cnt;
4761 struct hci_chan *chan;
4762 struct sk_buff *skb;
4763 int quote;
4764
4765 __check_timeout(hdev, cnt);
04837f64 4766
73d80deb 4767 while (hdev->acl_cnt &&
a8c5fb1a 4768 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4769 u32 priority = (skb_peek(&chan->data_q))->priority;
4770 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4771 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4772 skb->len, skb->priority);
73d80deb 4773
ec1cce24
LAD
4774 /* Stop if priority has changed */
4775 if (skb->priority < priority)
4776 break;
4777
4778 skb = skb_dequeue(&chan->data_q);
4779
73d80deb 4780 hci_conn_enter_active_mode(chan->conn,
04124681 4781 bt_cb(skb)->force_active);
04837f64 4782
57d17d70 4783 hci_send_frame(hdev, skb);
1da177e4
LT
4784 hdev->acl_last_tx = jiffies;
4785
4786 hdev->acl_cnt--;
73d80deb
LAD
4787 chan->sent++;
4788 chan->conn->sent++;
1da177e4
LT
4789 }
4790 }
02b20f0b
LAD
4791
4792 if (cnt != hdev->acl_cnt)
4793 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4794}
4795
6039aa73 4796static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4797{
63d2bc1b 4798 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4799 struct hci_chan *chan;
4800 struct sk_buff *skb;
4801 int quote;
bd1eb66b 4802 u8 type;
b71d385a 4803
63d2bc1b 4804 __check_timeout(hdev, cnt);
b71d385a 4805
bd1eb66b
AE
4806 BT_DBG("%s", hdev->name);
4807
4808 if (hdev->dev_type == HCI_AMP)
4809 type = AMP_LINK;
4810 else
4811 type = ACL_LINK;
4812
b71d385a 4813 while (hdev->block_cnt > 0 &&
bd1eb66b 4814 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4815 u32 priority = (skb_peek(&chan->data_q))->priority;
4816 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4817 int blocks;
4818
4819 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4820 skb->len, skb->priority);
b71d385a
AE
4821
4822 /* Stop if priority has changed */
4823 if (skb->priority < priority)
4824 break;
4825
4826 skb = skb_dequeue(&chan->data_q);
4827
4828 blocks = __get_blocks(hdev, skb);
4829 if (blocks > hdev->block_cnt)
4830 return;
4831
4832 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4833 bt_cb(skb)->force_active);
b71d385a 4834
57d17d70 4835 hci_send_frame(hdev, skb);
b71d385a
AE
4836 hdev->acl_last_tx = jiffies;
4837
4838 hdev->block_cnt -= blocks;
4839 quote -= blocks;
4840
4841 chan->sent += blocks;
4842 chan->conn->sent += blocks;
4843 }
4844 }
4845
4846 if (cnt != hdev->block_cnt)
bd1eb66b 4847 hci_prio_recalculate(hdev, type);
b71d385a
AE
4848}
4849
6039aa73 4850static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4851{
4852 BT_DBG("%s", hdev->name);
4853
bd1eb66b
AE
4854 /* No ACL link over BR/EDR controller */
4855 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4856 return;
4857
4858 /* No AMP link over AMP controller */
4859 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4860 return;
4861
4862 switch (hdev->flow_ctl_mode) {
4863 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4864 hci_sched_acl_pkt(hdev);
4865 break;
4866
4867 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4868 hci_sched_acl_blk(hdev);
4869 break;
4870 }
4871}
4872
1da177e4 4873/* Schedule SCO */
6039aa73 4874static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4875{
4876 struct hci_conn *conn;
4877 struct sk_buff *skb;
4878 int quote;
4879
4880 BT_DBG("%s", hdev->name);
4881
52087a79
LAD
4882 if (!hci_conn_num(hdev, SCO_LINK))
4883 return;
4884
1da177e4
LT
4885 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4886 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4887 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4888 hci_send_frame(hdev, skb);
1da177e4
LT
4889
4890 conn->sent++;
4891 if (conn->sent == ~0)
4892 conn->sent = 0;
4893 }
4894 }
4895}
4896
6039aa73 4897static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4898{
4899 struct hci_conn *conn;
4900 struct sk_buff *skb;
4901 int quote;
4902
4903 BT_DBG("%s", hdev->name);
4904
52087a79
LAD
4905 if (!hci_conn_num(hdev, ESCO_LINK))
4906 return;
4907
8fc9ced3
GP
4908 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4909 &quote))) {
b6a0dc82
MH
4910 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4911 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4912 hci_send_frame(hdev, skb);
b6a0dc82
MH
4913
4914 conn->sent++;
4915 if (conn->sent == ~0)
4916 conn->sent = 0;
4917 }
4918 }
4919}
4920
6039aa73 4921static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4922{
73d80deb 4923 struct hci_chan *chan;
6ed58ec5 4924 struct sk_buff *skb;
02b20f0b 4925 int quote, cnt, tmp;
6ed58ec5
VT
4926
4927 BT_DBG("%s", hdev->name);
4928
52087a79
LAD
4929 if (!hci_conn_num(hdev, LE_LINK))
4930 return;
4931
fee746b0 4932 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
6ed58ec5
VT
4933 /* LE tx timeout must be longer than maximum
4934 * link supervision timeout (40.9 seconds) */
bae1f5d9 4935 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4936 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4937 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4938 }
4939
4940 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4941 tmp = cnt;
73d80deb 4942 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4943 u32 priority = (skb_peek(&chan->data_q))->priority;
4944 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4945 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4946 skb->len, skb->priority);
6ed58ec5 4947
ec1cce24
LAD
4948 /* Stop if priority has changed */
4949 if (skb->priority < priority)
4950 break;
4951
4952 skb = skb_dequeue(&chan->data_q);
4953
57d17d70 4954 hci_send_frame(hdev, skb);
6ed58ec5
VT
4955 hdev->le_last_tx = jiffies;
4956
4957 cnt--;
73d80deb
LAD
4958 chan->sent++;
4959 chan->conn->sent++;
6ed58ec5
VT
4960 }
4961 }
73d80deb 4962
6ed58ec5
VT
4963 if (hdev->le_pkts)
4964 hdev->le_cnt = cnt;
4965 else
4966 hdev->acl_cnt = cnt;
02b20f0b
LAD
4967
4968 if (cnt != tmp)
4969 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4970}
4971
3eff45ea 4972static void hci_tx_work(struct work_struct *work)
1da177e4 4973{
3eff45ea 4974 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4975 struct sk_buff *skb;
4976
6ed58ec5 4977 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4978 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4979
52de599e
MH
4980 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4981 /* Schedule queues and send stuff to HCI driver */
4982 hci_sched_acl(hdev);
4983 hci_sched_sco(hdev);
4984 hci_sched_esco(hdev);
4985 hci_sched_le(hdev);
4986 }
6ed58ec5 4987
1da177e4
LT
4988 /* Send next queued raw (unknown type) packet */
4989 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4990 hci_send_frame(hdev, skb);
1da177e4
LT
4991}
4992
25985edc 4993/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4994
4995/* ACL data packet */
6039aa73 4996static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4997{
4998 struct hci_acl_hdr *hdr = (void *) skb->data;
4999 struct hci_conn *conn;
5000 __u16 handle, flags;
5001
5002 skb_pull(skb, HCI_ACL_HDR_SIZE);
5003
5004 handle = __le16_to_cpu(hdr->handle);
5005 flags = hci_flags(handle);
5006 handle = hci_handle(handle);
5007
f0e09510 5008 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5009 handle, flags);
1da177e4
LT
5010
5011 hdev->stat.acl_rx++;
5012
5013 hci_dev_lock(hdev);
5014 conn = hci_conn_hash_lookup_handle(hdev, handle);
5015 hci_dev_unlock(hdev);
8e87d142 5016
1da177e4 5017 if (conn) {
65983fc7 5018 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5019
1da177e4 5020 /* Send to upper protocol */
686ebf28
UF
5021 l2cap_recv_acldata(conn, skb, flags);
5022 return;
1da177e4 5023 } else {
8e87d142 5024 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5025 hdev->name, handle);
1da177e4
LT
5026 }
5027
5028 kfree_skb(skb);
5029}
5030
5031/* SCO data packet */
6039aa73 5032static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5033{
5034 struct hci_sco_hdr *hdr = (void *) skb->data;
5035 struct hci_conn *conn;
5036 __u16 handle;
5037
5038 skb_pull(skb, HCI_SCO_HDR_SIZE);
5039
5040 handle = __le16_to_cpu(hdr->handle);
5041
f0e09510 5042 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5043
5044 hdev->stat.sco_rx++;
5045
5046 hci_dev_lock(hdev);
5047 conn = hci_conn_hash_lookup_handle(hdev, handle);
5048 hci_dev_unlock(hdev);
5049
5050 if (conn) {
1da177e4 5051 /* Send to upper protocol */
686ebf28
UF
5052 sco_recv_scodata(conn, skb);
5053 return;
1da177e4 5054 } else {
8e87d142 5055 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5056 hdev->name, handle);
1da177e4
LT
5057 }
5058
5059 kfree_skb(skb);
5060}
5061
9238f36a
JH
5062static bool hci_req_is_complete(struct hci_dev *hdev)
5063{
5064 struct sk_buff *skb;
5065
5066 skb = skb_peek(&hdev->cmd_q);
5067 if (!skb)
5068 return true;
5069
5070 return bt_cb(skb)->req.start;
5071}
5072
42c6b129
JH
5073static void hci_resend_last(struct hci_dev *hdev)
5074{
5075 struct hci_command_hdr *sent;
5076 struct sk_buff *skb;
5077 u16 opcode;
5078
5079 if (!hdev->sent_cmd)
5080 return;
5081
5082 sent = (void *) hdev->sent_cmd->data;
5083 opcode = __le16_to_cpu(sent->opcode);
5084 if (opcode == HCI_OP_RESET)
5085 return;
5086
5087 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5088 if (!skb)
5089 return;
5090
5091 skb_queue_head(&hdev->cmd_q, skb);
5092 queue_work(hdev->workqueue, &hdev->cmd_work);
5093}
5094
9238f36a
JH
5095void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5096{
5097 hci_req_complete_t req_complete = NULL;
5098 struct sk_buff *skb;
5099 unsigned long flags;
5100
5101 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5102
42c6b129
JH
5103 /* If the completed command doesn't match the last one that was
5104 * sent we need to do special handling of it.
9238f36a 5105 */
42c6b129
JH
5106 if (!hci_sent_cmd_data(hdev, opcode)) {
5107 /* Some CSR based controllers generate a spontaneous
5108 * reset complete event during init and any pending
5109 * command will never be completed. In such a case we
5110 * need to resend whatever was the last sent
5111 * command.
5112 */
5113 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5114 hci_resend_last(hdev);
5115
9238f36a 5116 return;
42c6b129 5117 }
9238f36a
JH
5118
5119 /* If the command succeeded and there's still more commands in
5120 * this request the request is not yet complete.
5121 */
5122 if (!status && !hci_req_is_complete(hdev))
5123 return;
5124
5125 /* If this was the last command in a request the complete
5126 * callback would be found in hdev->sent_cmd instead of the
5127 * command queue (hdev->cmd_q).
5128 */
5129 if (hdev->sent_cmd) {
5130 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5131
5132 if (req_complete) {
5133 /* We must set the complete callback to NULL to
5134 * avoid calling the callback more than once if
5135 * this function gets called again.
5136 */
5137 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5138
9238f36a 5139 goto call_complete;
53e21fbc 5140 }
9238f36a
JH
5141 }
5142
5143 /* Remove all pending commands belonging to this request */
5144 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5145 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5146 if (bt_cb(skb)->req.start) {
5147 __skb_queue_head(&hdev->cmd_q, skb);
5148 break;
5149 }
5150
5151 req_complete = bt_cb(skb)->req.complete;
5152 kfree_skb(skb);
5153 }
5154 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5155
5156call_complete:
5157 if (req_complete)
5158 req_complete(hdev, status);
5159}
5160
b78752cc 5161static void hci_rx_work(struct work_struct *work)
1da177e4 5162{
b78752cc 5163 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5164 struct sk_buff *skb;
5165
5166 BT_DBG("%s", hdev->name);
5167
1da177e4 5168 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5169 /* Send copy to monitor */
5170 hci_send_to_monitor(hdev, skb);
5171
1da177e4
LT
5172 if (atomic_read(&hdev->promisc)) {
5173 /* Send copy to the sockets */
470fe1b5 5174 hci_send_to_sock(hdev, skb);
1da177e4
LT
5175 }
5176
fee746b0 5177 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5178 kfree_skb(skb);
5179 continue;
5180 }
5181
5182 if (test_bit(HCI_INIT, &hdev->flags)) {
5183 /* Don't process data packets in this states. */
0d48d939 5184 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5185 case HCI_ACLDATA_PKT:
5186 case HCI_SCODATA_PKT:
5187 kfree_skb(skb);
5188 continue;
3ff50b79 5189 }
1da177e4
LT
5190 }
5191
5192 /* Process frame */
0d48d939 5193 switch (bt_cb(skb)->pkt_type) {
1da177e4 5194 case HCI_EVENT_PKT:
b78752cc 5195 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5196 hci_event_packet(hdev, skb);
5197 break;
5198
5199 case HCI_ACLDATA_PKT:
5200 BT_DBG("%s ACL data packet", hdev->name);
5201 hci_acldata_packet(hdev, skb);
5202 break;
5203
5204 case HCI_SCODATA_PKT:
5205 BT_DBG("%s SCO data packet", hdev->name);
5206 hci_scodata_packet(hdev, skb);
5207 break;
5208
5209 default:
5210 kfree_skb(skb);
5211 break;
5212 }
5213 }
1da177e4
LT
5214}
5215
c347b765 5216static void hci_cmd_work(struct work_struct *work)
1da177e4 5217{
c347b765 5218 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5219 struct sk_buff *skb;
5220
2104786b
AE
5221 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5222 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5223
1da177e4 5224 /* Send queued commands */
5a08ecce
AE
5225 if (atomic_read(&hdev->cmd_cnt)) {
5226 skb = skb_dequeue(&hdev->cmd_q);
5227 if (!skb)
5228 return;
5229
7585b97a 5230 kfree_skb(hdev->sent_cmd);
1da177e4 5231
a675d7f1 5232 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5233 if (hdev->sent_cmd) {
1da177e4 5234 atomic_dec(&hdev->cmd_cnt);
57d17d70 5235 hci_send_frame(hdev, skb);
7bdb8a5c 5236 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5237 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5238 else
65cc2b49
MH
5239 schedule_delayed_work(&hdev->cmd_timer,
5240 HCI_CMD_TIMEOUT);
1da177e4
LT
5241 } else {
5242 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5243 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5244 }
5245 }
5246}
b1efcc28
AG
5247
5248void hci_req_add_le_scan_disable(struct hci_request *req)
5249{
5250 struct hci_cp_le_set_scan_enable cp;
5251
5252 memset(&cp, 0, sizeof(cp));
5253 cp.enable = LE_SCAN_DISABLE;
5254 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5255}
a4790dbd 5256
8ef30fd3
AG
5257void hci_req_add_le_passive_scan(struct hci_request *req)
5258{
5259 struct hci_cp_le_set_scan_param param_cp;
5260 struct hci_cp_le_set_scan_enable enable_cp;
5261 struct hci_dev *hdev = req->hdev;
5262 u8 own_addr_type;
5263
6ab535a7
MH
5264 /* Set require_privacy to false since no SCAN_REQ are send
5265 * during passive scanning. Not using an unresolvable address
5266 * here is important so that peer devices using direct
5267 * advertising with our address will be correctly reported
5268 * by the controller.
8ef30fd3 5269 */
6ab535a7 5270 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5271 return;
5272
5273 memset(&param_cp, 0, sizeof(param_cp));
5274 param_cp.type = LE_SCAN_PASSIVE;
5275 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5276 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5277 param_cp.own_address_type = own_addr_type;
5278 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5279 &param_cp);
5280
5281 memset(&enable_cp, 0, sizeof(enable_cp));
5282 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5283 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5284 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5285 &enable_cp);
5286}
5287
a4790dbd
AG
5288static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5289{
5290 if (status)
5291 BT_DBG("HCI request failed to update background scanning: "
5292 "status 0x%2.2x", status);
5293}
5294
5295/* This function controls the background scanning based on hdev->pend_le_conns
5296 * list. If there are pending LE connection we start the background scanning,
5297 * otherwise we stop it.
5298 *
5299 * This function requires the caller holds hdev->lock.
5300 */
5301void hci_update_background_scan(struct hci_dev *hdev)
5302{
a4790dbd
AG
5303 struct hci_request req;
5304 struct hci_conn *conn;
5305 int err;
5306
c20c02d5
MH
5307 if (!test_bit(HCI_UP, &hdev->flags) ||
5308 test_bit(HCI_INIT, &hdev->flags) ||
5309 test_bit(HCI_SETUP, &hdev->dev_flags) ||
b8221770 5310 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5311 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5312 return;
5313
a4790dbd
AG
5314 hci_req_init(&req, hdev);
5315
5316 if (list_empty(&hdev->pend_le_conns)) {
5317 /* If there is no pending LE connections, we should stop
5318 * the background scanning.
5319 */
5320
5321 /* If controller is not scanning we are done. */
5322 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5323 return;
5324
5325 hci_req_add_le_scan_disable(&req);
5326
5327 BT_DBG("%s stopping background scanning", hdev->name);
5328 } else {
a4790dbd
AG
5329 /* If there is at least one pending LE connection, we should
5330 * keep the background scan running.
5331 */
5332
a4790dbd
AG
5333 /* If controller is connecting, we should not start scanning
5334 * since some controllers are not able to scan and connect at
5335 * the same time.
5336 */
5337 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5338 if (conn)
5339 return;
5340
4340a124
AG
5341 /* If controller is currently scanning, we stop it to ensure we
5342 * don't miss any advertising (due to duplicates filter).
5343 */
5344 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5345 hci_req_add_le_scan_disable(&req);
5346
8ef30fd3 5347 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5348
5349 BT_DBG("%s starting background scanning", hdev->name);
5350 }
5351
5352 err = hci_req_run(&req, update_background_scan_complete);
5353 if (err)
5354 BT_ERR("Failed to run HCI request: err %d", err);
5355}
This page took 1.745339 seconds and 5 git commands to generate.