Bluetooth: Convert IRK list to RCU
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
babdbb3c
MH
302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
041000b9
MH
326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
ebd1e33b
MH
340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
5afeac14
MH
365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
111902f7 371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
111902f7 396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
397 return -EALREADY;
398
111902f7 399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
134c2a89
MH
411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
2bfa3531
MH
429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
2be48b65 437 hdev->idle_timeout = val;
2bfa3531
MH
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
c982b2ea
JH
457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
2bfa3531
MH
488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
2be48b65 496 hdev->sniff_min_interval = val;
2bfa3531
MH
497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
2be48b65 524 hdev->sniff_max_interval = val;
2bfa3531
MH
525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
31ad1691
AK
544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
ac345813
MH
600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
a1f4c318 603 bdaddr_t addr;
ac345813
MH
604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
a1f4c318 608 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 609
a1f4c318 610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 611 16, hdev->irk, &hdev->rpa);
ac345813
MH
612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
7a4cd51d
MH
630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
e7b8fc92
MH
653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
b32bba6c
MH
676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
92202185 679{
b32bba6c
MH
680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
92202185 682
111902f7 683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
687}
688
b32bba6c
MH
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
92202185 692{
b32bba6c
MH
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
92202185 697
b32bba6c
MH
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
92202185 700
b32bba6c
MH
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
706 return -EINVAL;
707
111902f7 708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
709 return -EALREADY;
710
111902f7 711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
712
713 return count;
92202185
MH
714}
715
b32bba6c
MH
716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
92202185 722
d2ab0ac1
MH
723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
3698d704
MH
748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
adae20cb 751 struct smp_irk *irk;
3698d704 752
adae20cb
JH
753 rcu_read_lock();
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
758 }
adae20cb 759 rcu_read_unlock();
3698d704
MH
760
761 return 0;
762}
763
764static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765{
766 return single_open(file, identity_resolving_keys_show,
767 inode->i_private);
768}
769
770static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775};
776
8f8625cd
MH
777static int long_term_keys_show(struct seq_file *f, void *ptr)
778{
779 struct hci_dev *hdev = f->private;
970d0f1b 780 struct smp_ltk *ltk;
8f8625cd 781
970d0f1b
JH
782 rcu_read_lock();
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
785 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 787 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 788 rcu_read_unlock();
8f8625cd
MH
789
790 return 0;
791}
792
793static int long_term_keys_open(struct inode *inode, struct file *file)
794{
795 return single_open(file, long_term_keys_show, inode->i_private);
796}
797
798static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803};
804
4e70c7e7
MH
805static int conn_min_interval_set(void *data, u64 val)
806{
807 struct hci_dev *hdev = data;
808
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810 return -EINVAL;
811
812 hci_dev_lock(hdev);
2be48b65 813 hdev->le_conn_min_interval = val;
4e70c7e7
MH
814 hci_dev_unlock(hdev);
815
816 return 0;
817}
818
819static int conn_min_interval_get(void *data, u64 *val)
820{
821 struct hci_dev *hdev = data;
822
823 hci_dev_lock(hdev);
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
826
827 return 0;
828}
829
830DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
832
833static int conn_max_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_max_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_max_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
860
816a93d1 861static int conn_latency_set(void *data, u64 val)
3f959d46
MH
862{
863 struct hci_dev *hdev = data;
864
816a93d1 865 if (val > 0x01f3)
3f959d46
MH
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
816a93d1 869 hdev->le_conn_latency = val;
3f959d46
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
816a93d1 875static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
816a93d1 880 *val = hdev->le_conn_latency;
3f959d46
MH
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
816a93d1
MH
886DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
3f959d46 888
f1649577 889static int supervision_timeout_set(void *data, u64 val)
89863109 890{
f1649577 891 struct hci_dev *hdev = data;
89863109 892
f1649577
MH
893 if (val < 0x000a || val > 0x0c80)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
89863109
JR
901}
902
f1649577 903static int supervision_timeout_get(void *data, u64 *val)
89863109 904{
f1649577 905 struct hci_dev *hdev = data;
89863109 906
f1649577
MH
907 hci_dev_lock(hdev);
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
89863109 910
f1649577
MH
911 return 0;
912}
89863109 913
f1649577
MH
914DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
89863109 916
3f959d46
MH
917static int adv_channel_map_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
89863109 920
3f959d46
MH
921 if (val < 0x01 || val > 0x07)
922 return -EINVAL;
89863109 923
3f959d46
MH
924 hci_dev_lock(hdev);
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
89863109 927
3f959d46
MH
928 return 0;
929}
89863109 930
3f959d46 931static int adv_channel_map_get(void *data, u64 *val)
7d474e06 932{
3f959d46 933 struct hci_dev *hdev = data;
7d474e06
AG
934
935 hci_dev_lock(hdev);
3f959d46
MH
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
7d474e06 938
3f959d46
MH
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
7d474e06 944
729a1051
GL
945static int adv_min_interval_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_min_interval = val;
7d474e06
AG
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
729a1051 959static int adv_min_interval_get(void *data, u64 *val)
7d474e06 960{
729a1051
GL
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
966
967 return 0;
7d474e06
AG
968}
969
729a1051
GL
970DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
972
973static int adv_max_interval_set(void *data, u64 val)
7d474e06 974{
729a1051 975 struct hci_dev *hdev = data;
7d474e06 976
729a1051 977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
978 return -EINVAL;
979
729a1051
GL
980 hci_dev_lock(hdev);
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
7d474e06 983
729a1051
GL
984 return 0;
985}
7d474e06 986
729a1051
GL
987static int adv_max_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
7d474e06 990
729a1051
GL
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
7d474e06 994
729a1051
GL
995 return 0;
996}
7d474e06 997
729a1051
GL
998DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
7d474e06 1000
0b3c7d37 1001static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1002{
0b3c7d37 1003 struct hci_dev *hdev = f->private;
7d474e06 1004 struct hci_conn_params *p;
40f4938a 1005 struct bdaddr_list *b;
7d474e06 1006
7d474e06 1007 hci_dev_lock(hdev);
40f4938a
MH
1008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1010 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1012 p->auto_connect);
7d474e06 1013 }
7d474e06 1014 hci_dev_unlock(hdev);
7d474e06 1015
7d474e06
AG
1016 return 0;
1017}
7d474e06 1018
0b3c7d37 1019static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1020{
0b3c7d37 1021 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1022}
1023
0b3c7d37
MH
1024static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
7d474e06 1026 .read = seq_read,
7d474e06
AG
1027 .llseek = seq_lseek,
1028 .release = single_release,
1029};
1030
1da177e4
LT
1031/* ---- HCI requests ---- */
1032
42c6b129 1033static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1034{
42c6b129 1035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1036
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1041 }
1042}
1043
1044static void hci_req_cancel(struct hci_dev *hdev, int err)
1045{
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1052 }
1053}
1054
77a63e0a
FW
1055static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056 u8 event)
75e84b7c
JH
1057{
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1061
1062 hci_dev_lock(hdev);
1063
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1066
1067 hci_dev_unlock(hdev);
1068
1069 if (!skb)
1070 return ERR_PTR(-ENODATA);
1071
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1074 goto failed;
1075 }
1076
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
7b1abbbe
JH
1080 if (event) {
1081 if (hdr->evt != event)
1082 goto failed;
1083 return skb;
1084 }
1085
75e84b7c
JH
1086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088 goto failed;
1089 }
1090
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1093 goto failed;
1094 }
1095
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1098
1099 if (opcode == __le16_to_cpu(ev->opcode))
1100 return skb;
1101
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1104
1105failed:
1106 kfree_skb(skb);
1107 return ERR_PTR(-ENODATA);
1108}
1109
7b1abbbe 1110struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1111 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1112{
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1115 int err = 0;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_req_init(&req, hdev);
1120
7b1abbbe 1121 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1122
1123 hdev->req_status = HCI_REQ_PEND;
1124
75e84b7c
JH
1125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
039fada5
CP
1128 err = hci_req_run(&req, hci_req_sync_complete);
1129 if (err < 0) {
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
1131 return ERR_PTR(err);
1132 }
1133
75e84b7c
JH
1134 schedule_timeout(timeout);
1135
1136 remove_wait_queue(&hdev->req_wait_q, &wait);
1137
1138 if (signal_pending(current))
1139 return ERR_PTR(-EINTR);
1140
1141 switch (hdev->req_status) {
1142 case HCI_REQ_DONE:
1143 err = -bt_to_errno(hdev->req_result);
1144 break;
1145
1146 case HCI_REQ_CANCELED:
1147 err = -hdev->req_result;
1148 break;
1149
1150 default:
1151 err = -ETIMEDOUT;
1152 break;
1153 }
1154
1155 hdev->req_status = hdev->req_result = 0;
1156
1157 BT_DBG("%s end: err %d", hdev->name, err);
1158
1159 if (err < 0)
1160 return ERR_PTR(err);
1161
7b1abbbe
JH
1162 return hci_get_cmd_complete(hdev, opcode, event);
1163}
1164EXPORT_SYMBOL(__hci_cmd_sync_ev);
1165
1166struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1167 const void *param, u32 timeout)
7b1abbbe
JH
1168{
1169 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1170}
1171EXPORT_SYMBOL(__hci_cmd_sync);
1172
1da177e4 1173/* Execute request and wait for completion. */
01178cd4 1174static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1175 void (*func)(struct hci_request *req,
1176 unsigned long opt),
01178cd4 1177 unsigned long opt, __u32 timeout)
1da177e4 1178{
42c6b129 1179 struct hci_request req;
1da177e4
LT
1180 DECLARE_WAITQUEUE(wait, current);
1181 int err = 0;
1182
1183 BT_DBG("%s start", hdev->name);
1184
42c6b129
JH
1185 hci_req_init(&req, hdev);
1186
1da177e4
LT
1187 hdev->req_status = HCI_REQ_PEND;
1188
42c6b129 1189 func(&req, opt);
53cce22d 1190
039fada5
CP
1191 add_wait_queue(&hdev->req_wait_q, &wait);
1192 set_current_state(TASK_INTERRUPTIBLE);
1193
42c6b129
JH
1194 err = hci_req_run(&req, hci_req_sync_complete);
1195 if (err < 0) {
53cce22d 1196 hdev->req_status = 0;
920c8300 1197
039fada5
CP
1198 remove_wait_queue(&hdev->req_wait_q, &wait);
1199
920c8300
AG
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
42c6b129 1204 */
920c8300
AG
1205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
53cce22d
JH
1209 }
1210
1da177e4
LT
1211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
e175072f 1220 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
3ff50b79 1230 }
1da177e4 1231
a5040efa 1232 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
01178cd4 1239static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
01178cd4 1242 unsigned long opt, __u32 timeout)
1da177e4
LT
1243{
1244 int ret;
1245
7c6a329e
MH
1246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
1da177e4
LT
1249 /* Serialize all requests */
1250 hci_req_lock(hdev);
01178cd4 1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
42c6b129 1257static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1258{
42c6b129 1259 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1260
1261 /* Reset device */
42c6b129
JH
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1264}
1265
42c6b129 1266static void bredr_init(struct hci_request *req)
1da177e4 1267{
42c6b129 1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1269
1da177e4 1270 /* Read Local Supported Features */
42c6b129 1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1272
1143e5a6 1273 /* Read Local Version */
42c6b129 1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1275
1276 /* Read BD Address */
42c6b129 1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1278}
1279
42c6b129 1280static void amp_init(struct hci_request *req)
e61ef499 1281{
42c6b129 1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1283
e61ef499 1284 /* Read Local Version */
42c6b129 1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1286
f6996cfe
MH
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
6bcbc489 1293 /* Read Local AMP Info */
42c6b129 1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1295
1296 /* Read Data Blk size */
42c6b129 1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1298
f38ba941
MH
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
7528ca1c
MH
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1304}
1305
42c6b129 1306static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1307{
42c6b129 1308 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
11778716
AE
1312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1314 hci_reset_req(req, 0);
11778716 1315
e61ef499
AE
1316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
42c6b129 1318 bredr_init(req);
e61ef499
AE
1319 break;
1320
1321 case HCI_AMP:
42c6b129 1322 amp_init(req);
e61ef499
AE
1323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
e61ef499
AE
1329}
1330
42c6b129 1331static void bredr_setup(struct hci_request *req)
2177bab5 1332{
4ca048e3
MH
1333 struct hci_dev *hdev = req->hdev;
1334
2177bab5
JH
1335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1340
1341 /* Read Class of Device */
42c6b129 1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1343
1344 /* Read Local Name */
42c6b129 1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1346
1347 /* Read Voice Setting */
42c6b129 1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1349
b4cb9fb2
MH
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
4b836f39
MH
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
2177bab5
JH
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1359
1360 /* Connection accept timeout ~20 secs */
dcf4adbf 1361 param = cpu_to_le16(0x7d00);
42c6b129 1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1363
4ca048e3
MH
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
2177bab5
JH
1371}
1372
42c6b129 1373static void le_setup(struct hci_request *req)
2177bab5 1374{
c73eee91
JH
1375 struct hci_dev *hdev = req->hdev;
1376
2177bab5 1377 /* Read LE Buffer Size */
42c6b129 1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1379
1380 /* Read LE Local Supported Features */
42c6b129 1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1382
747d3f03
MH
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
2177bab5 1386 /* Read LE White List Size */
42c6b129 1387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1388
747d3f03
MH
1389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
42c6b129 1425static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1426{
1427 u8 mode;
1428
42c6b129 1429 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1430
42c6b129 1431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1432}
1433
42c6b129 1434static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1435{
42c6b129
JH
1436 struct hci_dev *hdev = req->hdev;
1437
2177bab5
JH
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
2177bab5
JH
1471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
42c6b129 1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1509}
1510
42c6b129 1511static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1512{
42c6b129
JH
1513 struct hci_dev *hdev = req->hdev;
1514
2177bab5 1515 if (lmp_bredr_capable(hdev))
42c6b129 1516 bredr_setup(req);
56f87901
JH
1517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1519
1520 if (lmp_le_capable(hdev))
42c6b129 1521 le_setup(req);
2177bab5 1522
3f8e2d75
JH
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1528
1529 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
2177bab5
JH
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
42c6b129
JH
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
2177bab5
JH
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
42c6b129 1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
42c6b129 1553 hci_setup_inquiry_mode(req);
2177bab5
JH
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
42c6b129
JH
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
2177bab5
JH
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
42c6b129
JH
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
2177bab5
JH
1570 }
1571}
1572
42c6b129 1573static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1574{
42c6b129 1575 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
42c6b129 1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1590}
1591
42c6b129 1592static void hci_set_le_support(struct hci_request *req)
2177bab5 1593{
42c6b129 1594 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1595 struct hci_cp_write_le_host_supported cp;
1596
c73eee91
JH
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
2177bab5
JH
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
32226e4f 1605 cp.simul = 0x00;
2177bab5
JH
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
2177bab5
JH
1611}
1612
d62e6d67
JH
1613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
53b834d2 1621 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
53b834d2 1631 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
40c59fcb 1638 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1640 events[2] |= 0x80;
1641
d62e6d67
JH
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
42c6b129 1645static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1646{
42c6b129 1647 struct hci_dev *hdev = req->hdev;
d2c5d77f 1648 u8 p;
42c6b129 1649
0da71f1b
MH
1650 hci_setup_event_mask(req);
1651
b8f4e068
GP
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
f9f462fa
MH
1660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
637b4cae 1664 */
f9f462fa
MH
1665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
2177bab5 1675 if (hdev->commands[5] & 0x10)
42c6b129 1676 hci_setup_link_policy(req);
2177bab5 1677
9193c6e8
AG
1678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
4d6c705b
MH
1682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
9193c6e8
AG
1695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
15a49cca
MH
1698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
42c6b129 1703 hci_set_le_support(req);
9193c6e8 1704 }
d2c5d77f
JH
1705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
2177bab5
JH
1714}
1715
5d4e7e8d
JH
1716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
d62e6d67
JH
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
109e3191
MH
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
f4fe73ed
MH
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
5d4e7e8d 1732 /* Check for Synchronization Train support */
53b834d2 1733 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1735
1736 /* Enable Secure Connections if supported and configured */
5afeac14 1737 if ((lmp_sc_capable(hdev) ||
111902f7 1738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740 u8 support = 0x01;
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1743 }
5d4e7e8d
JH
1744}
1745
2177bab5
JH
1746static int __hci_init(struct hci_dev *hdev)
1747{
1748 int err;
1749
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751 if (err < 0)
1752 return err;
1753
4b4148e9
MH
1754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1756 */
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759 &dut_mode_fops);
1760 }
1761
2177bab5
JH
1762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1764 * first stage init.
1765 */
1766 if (hdev->dev_type != HCI_BREDR)
1767 return 0;
1768
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770 if (err < 0)
1771 return err;
1772
5d4e7e8d
JH
1773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774 if (err < 0)
1775 return err;
1776
baf27f6e
MH
1777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778 if (err < 0)
1779 return err;
1780
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1783 */
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785 return 0;
1786
dfb826a8
MH
1787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788 &features_fops);
ceeb3bc0
MH
1789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794 &device_list_fops);
70afe0b8
MH
1795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796 &blacklist_fops);
47219839
MH
1797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
31ad1691
AK
1799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1803
baf27f6e
MH
1804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
02d08d15
MH
1807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
babdbb3c
MH
1809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
041000b9
MH
1811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
baf27f6e
MH
1813 }
1814
06f5b778 1815 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
5afeac14
MH
1818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
134c2a89
MH
1820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
06f5b778 1822 }
ebd1e33b 1823
2bfa3531
MH
1824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1831 }
1832
d0f729b8 1833 if (lmp_le_capable(hdev)) {
ac345813
MH
1834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
b32bba6c
MH
1840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1842
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1846 */
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1851
d0f729b8
MH
1852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
d2ab0ac1
MH
1854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855 &white_list_fops);
3698d704
MH
1856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
8f8625cd
MH
1859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
4e70c7e7
MH
1861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
816a93d1
MH
1865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
f1649577
MH
1867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
3f959d46
MH
1869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
729a1051
GL
1871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876 hdev->debugfs,
1877 &hdev->discov_interleaved_timeout);
54506918 1878
711eafe3 1879 smp_register(hdev);
d0f729b8 1880 }
e7b8fc92 1881
baf27f6e 1882 return 0;
2177bab5
JH
1883}
1884
0ebca7d6
MH
1885static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886{
1887 struct hci_dev *hdev = req->hdev;
1888
1889 BT_DBG("%s %ld", hdev->name, opt);
1890
1891 /* Reset */
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1894
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901}
1902
1903static int __hci_unconf_init(struct hci_dev *hdev)
1904{
1905 int err;
1906
cc78b44b
MH
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 return 0;
1909
0ebca7d6
MH
1910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911 if (err < 0)
1912 return err;
1913
1914 return 0;
1915}
1916
42c6b129 1917static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1918{
1919 __u8 scan = opt;
1920
42c6b129 1921 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1922
1923 /* Inquiry and Page scans */
42c6b129 1924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1925}
1926
42c6b129 1927static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1928{
1929 __u8 auth = opt;
1930
42c6b129 1931 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1932
1933 /* Authentication */
42c6b129 1934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1935}
1936
42c6b129 1937static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1938{
1939 __u8 encrypt = opt;
1940
42c6b129 1941 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1942
e4e8e37c 1943 /* Encryption */
42c6b129 1944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1945}
1946
42c6b129 1947static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1948{
1949 __le16 policy = cpu_to_le16(opt);
1950
42c6b129 1951 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1952
1953 /* Default link policy */
42c6b129 1954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1955}
1956
8e87d142 1957/* Get HCI device by index.
1da177e4
LT
1958 * Device is held on return. */
1959struct hci_dev *hci_dev_get(int index)
1960{
8035ded4 1961 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1962
1963 BT_DBG("%d", index);
1964
1965 if (index < 0)
1966 return NULL;
1967
1968 read_lock(&hci_dev_list_lock);
8035ded4 1969 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1972 break;
1973 }
1974 }
1975 read_unlock(&hci_dev_list_lock);
1976 return hdev;
1977}
1da177e4
LT
1978
1979/* ---- Inquiry support ---- */
ff9ef578 1980
30dc78e1
JH
1981bool hci_discovery_active(struct hci_dev *hdev)
1982{
1983 struct discovery_state *discov = &hdev->discovery;
1984
6fbe195d 1985 switch (discov->state) {
343f935b 1986 case DISCOVERY_FINDING:
6fbe195d 1987 case DISCOVERY_RESOLVING:
30dc78e1
JH
1988 return true;
1989
6fbe195d
AG
1990 default:
1991 return false;
1992 }
30dc78e1
JH
1993}
1994
ff9ef578
JH
1995void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996{
bb3e0a33
JH
1997 int old_state = hdev->discovery.state;
1998
ff9ef578
JH
1999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
bb3e0a33 2001 if (old_state == state)
ff9ef578
JH
2002 return;
2003
bb3e0a33
JH
2004 hdev->discovery.state = state;
2005
ff9ef578
JH
2006 switch (state) {
2007 case DISCOVERY_STOPPED:
c54c3860
AG
2008 hci_update_background_scan(hdev);
2009
bb3e0a33 2010 if (old_state != DISCOVERY_STARTING)
7b99b659 2011 mgmt_discovering(hdev, 0);
ff9ef578
JH
2012 break;
2013 case DISCOVERY_STARTING:
2014 break;
343f935b 2015 case DISCOVERY_FINDING:
ff9ef578
JH
2016 mgmt_discovering(hdev, 1);
2017 break;
30dc78e1
JH
2018 case DISCOVERY_RESOLVING:
2019 break;
ff9ef578
JH
2020 case DISCOVERY_STOPPING:
2021 break;
2022 }
ff9ef578
JH
2023}
2024
1f9b9a5d 2025void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2026{
30883512 2027 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2028 struct inquiry_entry *p, *n;
1da177e4 2029
561aafbc
JH
2030 list_for_each_entry_safe(p, n, &cache->all, all) {
2031 list_del(&p->all);
b57c1a56 2032 kfree(p);
1da177e4 2033 }
561aafbc
JH
2034
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2037}
2038
a8c5fb1a
GP
2039struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040 bdaddr_t *bdaddr)
1da177e4 2041{
30883512 2042 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2043 struct inquiry_entry *e;
2044
6ed93dc6 2045 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2046
561aafbc
JH
2047 list_for_each_entry(e, &cache->all, all) {
2048 if (!bacmp(&e->data.bdaddr, bdaddr))
2049 return e;
2050 }
2051
2052 return NULL;
2053}
2054
2055struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2056 bdaddr_t *bdaddr)
561aafbc 2057{
30883512 2058 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2059 struct inquiry_entry *e;
2060
6ed93dc6 2061 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2062
2063 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2064 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2065 return e;
2066 }
2067
2068 return NULL;
1da177e4
LT
2069}
2070
30dc78e1 2071struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2072 bdaddr_t *bdaddr,
2073 int state)
30dc78e1
JH
2074{
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2077
6ed93dc6 2078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2079
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082 return e;
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2084 return e;
2085 }
2086
2087 return NULL;
2088}
2089
a3d4e20a 2090void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2091 struct inquiry_entry *ie)
a3d4e20a
JH
2092{
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2096
2097 list_del(&ie->list);
2098
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2101 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2102 break;
2103 pos = &p->list;
2104 }
2105
2106 list_add(&ie->list, pos);
2107}
2108
af58925c
MH
2109u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 bool name_known)
1da177e4 2111{
30883512 2112 struct discovery_state *cache = &hdev->discovery;
70f23020 2113 struct inquiry_entry *ie;
af58925c 2114 u32 flags = 0;
1da177e4 2115
6ed93dc6 2116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2117
2b2fec4d
SJ
2118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
af58925c
MH
2120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2122
70f23020 2123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2124 if (ie) {
af58925c
MH
2125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2127
a3d4e20a 2128 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2129 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2132 }
2133
561aafbc 2134 goto update;
a3d4e20a 2135 }
561aafbc
JH
2136
2137 /* Entry not in the cache. Add new one. */
27f70f3e 2138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2139 if (!ie) {
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141 goto done;
2142 }
561aafbc
JH
2143
2144 list_add(&ie->all, &cache->all);
2145
2146 if (name_known) {
2147 ie->name_state = NAME_KNOWN;
2148 } else {
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2151 }
70f23020 2152
561aafbc
JH
2153update:
2154 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2155 ie->name_state != NAME_PENDING) {
561aafbc
JH
2156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
1da177e4
LT
2158 }
2159
70f23020
AE
2160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
1da177e4 2162 cache->timestamp = jiffies;
3175405b
JH
2163
2164 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2166
af58925c
MH
2167done:
2168 return flags;
1da177e4
LT
2169}
2170
2171static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172{
30883512 2173 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2176 int copied = 0;
2177
561aafbc 2178 list_for_each_entry(e, &cache->all, all) {
1da177e4 2179 struct inquiry_data *data = &e->data;
b57c1a56
JH
2180
2181 if (copied >= num)
2182 break;
2183
1da177e4
LT
2184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
b57c1a56 2190
1da177e4 2191 info++;
b57c1a56 2192 copied++;
1da177e4
LT
2193 }
2194
2195 BT_DBG("cache %p, copied %d", cache, copied);
2196 return copied;
2197}
2198
42c6b129 2199static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2200{
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2202 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2203 struct hci_cp_inquiry cp;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2208 return;
2209
2210 /* Start Inquiry */
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
42c6b129 2214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2215}
2216
2217int hci_inquiry(void __user *arg)
2218{
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2223 long timeo;
2224 __u8 *buf;
2225
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2227 return -EFAULT;
2228
5a08ecce
AE
2229 hdev = hci_dev_get(ir.dev_id);
2230 if (!hdev)
1da177e4
LT
2231 return -ENODEV;
2232
0736cfa8
MH
2233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234 err = -EBUSY;
2235 goto done;
2236 }
2237
4a964404 2238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2239 err = -EOPNOTSUPP;
2240 goto done;
2241 }
2242
5b69bef5
MH
2243 if (hdev->dev_type != HCI_BREDR) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
56f87901
JH
2248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249 err = -EOPNOTSUPP;
2250 goto done;
2251 }
2252
09fd0de5 2253 hci_dev_lock(hdev);
8e87d142 2254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2256 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2257 do_inquiry = 1;
2258 }
09fd0de5 2259 hci_dev_unlock(hdev);
1da177e4 2260
04837f64 2261 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2262
2263 if (do_inquiry) {
01178cd4
JH
2264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265 timeo);
70f23020
AE
2266 if (err < 0)
2267 goto done;
3e13fa1e
AG
2268
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2271 */
74316201 2272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2273 TASK_INTERRUPTIBLE))
2274 return -EINTR;
70f23020 2275 }
1da177e4 2276
8fc9ced3
GP
2277 /* for unlimited number of responses we will use buffer with
2278 * 255 entries
2279 */
1da177e4
LT
2280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2284 */
01df8c31 2285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2286 if (!buf) {
1da177e4
LT
2287 err = -ENOMEM;
2288 goto done;
2289 }
2290
09fd0de5 2291 hci_dev_lock(hdev);
1da177e4 2292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2293 hci_dev_unlock(hdev);
1da177e4
LT
2294
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298 ptr += sizeof(ir);
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2300 ir.num_rsp))
1da177e4 2301 err = -EFAULT;
8e87d142 2302 } else
1da177e4
LT
2303 err = -EFAULT;
2304
2305 kfree(buf);
2306
2307done:
2308 hci_dev_put(hdev);
2309 return err;
2310}
2311
cbed0ca1 2312static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2313{
1da177e4
LT
2314 int ret = 0;
2315
1da177e4
LT
2316 BT_DBG("%s %p", hdev->name, hdev);
2317
2318 hci_req_lock(hdev);
2319
94324962
JH
2320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321 ret = -ENODEV;
2322 goto done;
2323 }
2324
d603b76b
MH
2325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2329 */
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331 ret = -ERFKILL;
2332 goto done;
2333 }
2334
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2338 * or not.
2339 *
c6beca0e
MH
2340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2342 * available.
2343 *
a5c8f270
MH
2344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2346 */
c6beca0e
MH
2347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2352 goto done;
2353 }
611b30f7
MH
2354 }
2355
1da177e4
LT
2356 if (test_bit(HCI_UP, &hdev->flags)) {
2357 ret = -EALREADY;
2358 goto done;
2359 }
2360
1da177e4
LT
2361 if (hdev->open(hdev)) {
2362 ret = -EIO;
2363 goto done;
2364 }
2365
f41c70c4
MH
2366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2368
af202f84
MH
2369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370 if (hdev->setup)
2371 ret = hdev->setup(hdev);
f41c70c4 2372
af202f84
MH
2373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2375 *
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2378 */
eb1904f4
MH
2379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2382
0ebca7d6
MH
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2386 *
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2390 */
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2393 }
2394
9713c17b
MH
2395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2399 * on procedure.
2400 */
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402 hdev->set_bdaddr)
24c457e2
MH
2403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404 else
2405 ret = -EADDRNOTAVAIL;
2406 }
2407
f41c70c4 2408 if (!ret) {
4a964404 2409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2411 ret = __hci_init(hdev);
1da177e4
LT
2412 }
2413
f41c70c4
MH
2414 clear_bit(HCI_INIT, &hdev->flags);
2415
1da177e4
LT
2416 if (!ret) {
2417 hci_dev_hold(hdev);
d6bfd59c 2418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2425 hdev->dev_type == HCI_BREDR) {
09fd0de5 2426 hci_dev_lock(hdev);
744cf19e 2427 mgmt_powered(hdev, 1);
09fd0de5 2428 hci_dev_unlock(hdev);
56e5cb86 2429 }
8e87d142 2430 } else {
1da177e4 2431 /* Init failed, cleanup */
3eff45ea 2432 flush_work(&hdev->tx_work);
c347b765 2433 flush_work(&hdev->cmd_work);
b78752cc 2434 flush_work(&hdev->rx_work);
1da177e4
LT
2435
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2438
2439 if (hdev->flush)
2440 hdev->flush(hdev);
2441
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2445 }
2446
2447 hdev->close(hdev);
fee746b0 2448 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2449 }
2450
2451done:
2452 hci_req_unlock(hdev);
1da177e4
LT
2453 return ret;
2454}
2455
cbed0ca1
JH
2456/* ---- HCI ioctl helpers ---- */
2457
2458int hci_dev_open(__u16 dev)
2459{
2460 struct hci_dev *hdev;
2461 int err;
2462
2463 hdev = hci_dev_get(dev);
2464 if (!hdev)
2465 return -ENODEV;
2466
4a964404 2467 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2470 * possible.
2471 *
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2474 * open the device.
2475 */
4a964404 2476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478 err = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
e1d08f40
JH
2482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2485 * completed.
2486 */
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2489
a5c8f270
MH
2490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2493 */
e1d08f40
JH
2494 flush_workqueue(hdev->req_workqueue);
2495
12aa4f0a 2496 /* For controllers not using the management interface and that
b6ae8457 2497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2501 */
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2505
cbed0ca1
JH
2506 err = hci_dev_do_open(hdev);
2507
fee746b0 2508done:
cbed0ca1 2509 hci_dev_put(hdev);
cbed0ca1
JH
2510 return err;
2511}
2512
d7347f3c
JH
2513/* This function requires the caller holds hdev->lock */
2514static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515{
2516 struct hci_conn_params *p;
2517
f161dd41
JH
2518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2519 if (p->conn) {
2520 hci_conn_drop(p->conn);
f8aaf9b6 2521 hci_conn_put(p->conn);
f161dd41
JH
2522 p->conn = NULL;
2523 }
d7347f3c 2524 list_del_init(&p->action);
f161dd41 2525 }
d7347f3c
JH
2526
2527 BT_DBG("All LE pending actions cleared");
2528}
2529
1da177e4
LT
2530static int hci_dev_do_close(struct hci_dev *hdev)
2531{
2532 BT_DBG("%s %p", hdev->name, hdev);
2533
78c04c0b
VCG
2534 cancel_delayed_work(&hdev->power_off);
2535
1da177e4
LT
2536 hci_req_cancel(hdev, ENODEV);
2537 hci_req_lock(hdev);
2538
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2540 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2541 hci_req_unlock(hdev);
2542 return 0;
2543 }
2544
3eff45ea
GP
2545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
b78752cc 2547 flush_work(&hdev->rx_work);
1da177e4 2548
16ab91ab 2549 if (hdev->discov_timeout > 0) {
e0f9309f 2550 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2551 hdev->discov_timeout = 0;
5e5282bb 2552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2554 }
2555
a8b2d5c2 2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2557 cancel_delayed_work(&hdev->service_cache);
2558
7ba8b4be 2559 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2560
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2563
09fd0de5 2564 hci_dev_lock(hdev);
1f9b9a5d 2565 hci_inquiry_cache_flush(hdev);
d7347f3c 2566 hci_pend_le_actions_clear(hdev);
f161dd41 2567 hci_conn_hash_flush(hdev);
09fd0de5 2568 hci_dev_unlock(hdev);
1da177e4
LT
2569
2570 hci_notify(hdev, HCI_DEV_DOWN);
2571
2572 if (hdev->flush)
2573 hdev->flush(hdev);
2574
2575 /* Reset device */
2576 skb_queue_purge(&hdev->cmd_q);
2577 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2578 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2579 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2580 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2581 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2582 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2583 clear_bit(HCI_INIT, &hdev->flags);
2584 }
2585
c347b765
GP
2586 /* flush cmd work */
2587 flush_work(&hdev->cmd_work);
1da177e4
LT
2588
2589 /* Drop queues */
2590 skb_queue_purge(&hdev->rx_q);
2591 skb_queue_purge(&hdev->cmd_q);
2592 skb_queue_purge(&hdev->raw_q);
2593
2594 /* Drop last sent command */
2595 if (hdev->sent_cmd) {
65cc2b49 2596 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2597 kfree_skb(hdev->sent_cmd);
2598 hdev->sent_cmd = NULL;
2599 }
2600
b6ddb638
JH
2601 kfree_skb(hdev->recv_evt);
2602 hdev->recv_evt = NULL;
2603
1da177e4
LT
2604 /* After this point our queues are empty
2605 * and no tasks are scheduled. */
2606 hdev->close(hdev);
2607
35b973c9 2608 /* Clear flags */
fee746b0 2609 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2610 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2611
93c311a0
MH
2612 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2613 if (hdev->dev_type == HCI_BREDR) {
2614 hci_dev_lock(hdev);
2615 mgmt_powered(hdev, 0);
2616 hci_dev_unlock(hdev);
2617 }
8ee56540 2618 }
5add6af8 2619
ced5c338 2620 /* Controller radio is available but is currently powered down */
536619e8 2621 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2622
e59fda8d 2623 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2624 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2625 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2626
1da177e4
LT
2627 hci_req_unlock(hdev);
2628
2629 hci_dev_put(hdev);
2630 return 0;
2631}
2632
2633int hci_dev_close(__u16 dev)
2634{
2635 struct hci_dev *hdev;
2636 int err;
2637
70f23020
AE
2638 hdev = hci_dev_get(dev);
2639 if (!hdev)
1da177e4 2640 return -ENODEV;
8ee56540 2641
0736cfa8
MH
2642 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2643 err = -EBUSY;
2644 goto done;
2645 }
2646
8ee56540
MH
2647 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2648 cancel_delayed_work(&hdev->power_off);
2649
1da177e4 2650 err = hci_dev_do_close(hdev);
8ee56540 2651
0736cfa8 2652done:
1da177e4
LT
2653 hci_dev_put(hdev);
2654 return err;
2655}
2656
2657int hci_dev_reset(__u16 dev)
2658{
2659 struct hci_dev *hdev;
2660 int ret = 0;
2661
70f23020
AE
2662 hdev = hci_dev_get(dev);
2663 if (!hdev)
1da177e4
LT
2664 return -ENODEV;
2665
2666 hci_req_lock(hdev);
1da177e4 2667
808a049e
MH
2668 if (!test_bit(HCI_UP, &hdev->flags)) {
2669 ret = -ENETDOWN;
1da177e4 2670 goto done;
808a049e 2671 }
1da177e4 2672
0736cfa8
MH
2673 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2674 ret = -EBUSY;
2675 goto done;
2676 }
2677
4a964404 2678 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2679 ret = -EOPNOTSUPP;
2680 goto done;
2681 }
2682
1da177e4
LT
2683 /* Drop queues */
2684 skb_queue_purge(&hdev->rx_q);
2685 skb_queue_purge(&hdev->cmd_q);
2686
09fd0de5 2687 hci_dev_lock(hdev);
1f9b9a5d 2688 hci_inquiry_cache_flush(hdev);
1da177e4 2689 hci_conn_hash_flush(hdev);
09fd0de5 2690 hci_dev_unlock(hdev);
1da177e4
LT
2691
2692 if (hdev->flush)
2693 hdev->flush(hdev);
2694
8e87d142 2695 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2697
fee746b0 2698 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2699
2700done:
1da177e4
LT
2701 hci_req_unlock(hdev);
2702 hci_dev_put(hdev);
2703 return ret;
2704}
2705
2706int hci_dev_reset_stat(__u16 dev)
2707{
2708 struct hci_dev *hdev;
2709 int ret = 0;
2710
70f23020
AE
2711 hdev = hci_dev_get(dev);
2712 if (!hdev)
1da177e4
LT
2713 return -ENODEV;
2714
0736cfa8
MH
2715 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2716 ret = -EBUSY;
2717 goto done;
2718 }
2719
4a964404 2720 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2721 ret = -EOPNOTSUPP;
2722 goto done;
2723 }
2724
1da177e4
LT
2725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2726
0736cfa8 2727done:
1da177e4 2728 hci_dev_put(hdev);
1da177e4
LT
2729 return ret;
2730}
2731
123abc08
JH
2732static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2733{
bc6d2d04 2734 bool conn_changed, discov_changed;
123abc08
JH
2735
2736 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2737
2738 if ((scan & SCAN_PAGE))
2739 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2740 &hdev->dev_flags);
2741 else
2742 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2743 &hdev->dev_flags);
2744
bc6d2d04
JH
2745 if ((scan & SCAN_INQUIRY)) {
2746 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2747 &hdev->dev_flags);
2748 } else {
2749 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2750 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2751 &hdev->dev_flags);
2752 }
2753
123abc08
JH
2754 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755 return;
2756
bc6d2d04
JH
2757 if (conn_changed || discov_changed) {
2758 /* In case this was disabled through mgmt */
2759 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2760
2761 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2762 mgmt_update_adv_data(hdev);
2763
123abc08 2764 mgmt_new_settings(hdev);
bc6d2d04 2765 }
123abc08
JH
2766}
2767
1da177e4
LT
2768int hci_dev_cmd(unsigned int cmd, void __user *arg)
2769{
2770 struct hci_dev *hdev;
2771 struct hci_dev_req dr;
2772 int err = 0;
2773
2774 if (copy_from_user(&dr, arg, sizeof(dr)))
2775 return -EFAULT;
2776
70f23020
AE
2777 hdev = hci_dev_get(dr.dev_id);
2778 if (!hdev)
1da177e4
LT
2779 return -ENODEV;
2780
0736cfa8
MH
2781 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2782 err = -EBUSY;
2783 goto done;
2784 }
2785
4a964404 2786 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2787 err = -EOPNOTSUPP;
2788 goto done;
2789 }
2790
5b69bef5
MH
2791 if (hdev->dev_type != HCI_BREDR) {
2792 err = -EOPNOTSUPP;
2793 goto done;
2794 }
2795
56f87901
JH
2796 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2797 err = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
1da177e4
LT
2801 switch (cmd) {
2802 case HCISETAUTH:
01178cd4
JH
2803 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2804 HCI_INIT_TIMEOUT);
1da177e4
LT
2805 break;
2806
2807 case HCISETENCRYPT:
2808 if (!lmp_encrypt_capable(hdev)) {
2809 err = -EOPNOTSUPP;
2810 break;
2811 }
2812
2813 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2814 /* Auth must be enabled first */
01178cd4
JH
2815 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2816 HCI_INIT_TIMEOUT);
1da177e4
LT
2817 if (err)
2818 break;
2819 }
2820
01178cd4
JH
2821 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2822 HCI_INIT_TIMEOUT);
1da177e4
LT
2823 break;
2824
2825 case HCISETSCAN:
01178cd4
JH
2826 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2827 HCI_INIT_TIMEOUT);
91a668b0 2828
bc6d2d04
JH
2829 /* Ensure that the connectable and discoverable states
2830 * get correctly modified as this was a non-mgmt change.
91a668b0 2831 */
123abc08
JH
2832 if (!err)
2833 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2834 break;
2835
1da177e4 2836 case HCISETLINKPOL:
01178cd4
JH
2837 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2838 HCI_INIT_TIMEOUT);
1da177e4
LT
2839 break;
2840
2841 case HCISETLINKMODE:
e4e8e37c
MH
2842 hdev->link_mode = ((__u16) dr.dev_opt) &
2843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2844 break;
2845
2846 case HCISETPTYPE:
2847 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2848 break;
2849
2850 case HCISETACLMTU:
e4e8e37c
MH
2851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2853 break;
2854
2855 case HCISETSCOMTU:
e4e8e37c
MH
2856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2858 break;
2859
2860 default:
2861 err = -EINVAL;
2862 break;
2863 }
e4e8e37c 2864
0736cfa8 2865done:
1da177e4
LT
2866 hci_dev_put(hdev);
2867 return err;
2868}
2869
2870int hci_get_dev_list(void __user *arg)
2871{
8035ded4 2872 struct hci_dev *hdev;
1da177e4
LT
2873 struct hci_dev_list_req *dl;
2874 struct hci_dev_req *dr;
1da177e4
LT
2875 int n = 0, size, err;
2876 __u16 dev_num;
2877
2878 if (get_user(dev_num, (__u16 __user *) arg))
2879 return -EFAULT;
2880
2881 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2882 return -EINVAL;
2883
2884 size = sizeof(*dl) + dev_num * sizeof(*dr);
2885
70f23020
AE
2886 dl = kzalloc(size, GFP_KERNEL);
2887 if (!dl)
1da177e4
LT
2888 return -ENOMEM;
2889
2890 dr = dl->dev_req;
2891
f20d09d5 2892 read_lock(&hci_dev_list_lock);
8035ded4 2893 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2894 unsigned long flags = hdev->flags;
c542a06c 2895
2e84d8db
MH
2896 /* When the auto-off is configured it means the transport
2897 * is running, but in that case still indicate that the
2898 * device is actually down.
2899 */
2900 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2901 flags &= ~BIT(HCI_UP);
c542a06c 2902
1da177e4 2903 (dr + n)->dev_id = hdev->id;
2e84d8db 2904 (dr + n)->dev_opt = flags;
c542a06c 2905
1da177e4
LT
2906 if (++n >= dev_num)
2907 break;
2908 }
f20d09d5 2909 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2910
2911 dl->dev_num = n;
2912 size = sizeof(*dl) + n * sizeof(*dr);
2913
2914 err = copy_to_user(arg, dl, size);
2915 kfree(dl);
2916
2917 return err ? -EFAULT : 0;
2918}
2919
2920int hci_get_dev_info(void __user *arg)
2921{
2922 struct hci_dev *hdev;
2923 struct hci_dev_info di;
2e84d8db 2924 unsigned long flags;
1da177e4
LT
2925 int err = 0;
2926
2927 if (copy_from_user(&di, arg, sizeof(di)))
2928 return -EFAULT;
2929
70f23020
AE
2930 hdev = hci_dev_get(di.dev_id);
2931 if (!hdev)
1da177e4
LT
2932 return -ENODEV;
2933
2e84d8db
MH
2934 /* When the auto-off is configured it means the transport
2935 * is running, but in that case still indicate that the
2936 * device is actually down.
2937 */
2938 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2939 flags = hdev->flags & ~BIT(HCI_UP);
2940 else
2941 flags = hdev->flags;
c542a06c 2942
1da177e4
LT
2943 strcpy(di.name, hdev->name);
2944 di.bdaddr = hdev->bdaddr;
60f2a3ed 2945 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2946 di.flags = flags;
1da177e4 2947 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2948 if (lmp_bredr_capable(hdev)) {
2949 di.acl_mtu = hdev->acl_mtu;
2950 di.acl_pkts = hdev->acl_pkts;
2951 di.sco_mtu = hdev->sco_mtu;
2952 di.sco_pkts = hdev->sco_pkts;
2953 } else {
2954 di.acl_mtu = hdev->le_mtu;
2955 di.acl_pkts = hdev->le_pkts;
2956 di.sco_mtu = 0;
2957 di.sco_pkts = 0;
2958 }
1da177e4
LT
2959 di.link_policy = hdev->link_policy;
2960 di.link_mode = hdev->link_mode;
2961
2962 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2963 memcpy(&di.features, &hdev->features, sizeof(di.features));
2964
2965 if (copy_to_user(arg, &di, sizeof(di)))
2966 err = -EFAULT;
2967
2968 hci_dev_put(hdev);
2969
2970 return err;
2971}
2972
2973/* ---- Interface to HCI drivers ---- */
2974
611b30f7
MH
2975static int hci_rfkill_set_block(void *data, bool blocked)
2976{
2977 struct hci_dev *hdev = data;
2978
2979 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2980
0736cfa8
MH
2981 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2982 return -EBUSY;
2983
5e130367
JH
2984 if (blocked) {
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2986 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2987 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2988 hci_dev_do_close(hdev);
5e130367
JH
2989 } else {
2990 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2991 }
611b30f7
MH
2992
2993 return 0;
2994}
2995
2996static const struct rfkill_ops hci_rfkill_ops = {
2997 .set_block = hci_rfkill_set_block,
2998};
2999
ab81cbf9
JH
3000static void hci_power_on(struct work_struct *work)
3001{
3002 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3003 int err;
ab81cbf9
JH
3004
3005 BT_DBG("%s", hdev->name);
3006
cbed0ca1 3007 err = hci_dev_do_open(hdev);
96570ffc
JH
3008 if (err < 0) {
3009 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3010 return;
96570ffc 3011 }
ab81cbf9 3012
a5c8f270
MH
3013 /* During the HCI setup phase, a few error conditions are
3014 * ignored and they need to be checked now. If they are still
3015 * valid, it is important to turn the device back off.
3016 */
3017 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3018 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3019 (hdev->dev_type == HCI_BREDR &&
3020 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3021 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3022 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3023 hci_dev_do_close(hdev);
3024 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3025 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3026 HCI_AUTO_OFF_TIMEOUT);
bf543036 3027 }
ab81cbf9 3028
fee746b0 3029 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3030 /* For unconfigured devices, set the HCI_RAW flag
3031 * so that userspace can easily identify them.
4a964404
MH
3032 */
3033 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3034 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3035
3036 /* For fully configured devices, this will send
3037 * the Index Added event. For unconfigured devices,
3038 * it will send Unconfigued Index Added event.
3039 *
3040 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3041 * and no event will be send.
3042 */
3043 mgmt_index_added(hdev);
d603b76b 3044 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3045 /* When the controller is now configured, then it
3046 * is important to clear the HCI_RAW flag.
3047 */
3048 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3049 clear_bit(HCI_RAW, &hdev->flags);
3050
d603b76b
MH
3051 /* Powering on the controller with HCI_CONFIG set only
3052 * happens with the transition from unconfigured to
3053 * configured. This will send the Index Added event.
3054 */
744cf19e 3055 mgmt_index_added(hdev);
fee746b0 3056 }
ab81cbf9
JH
3057}
3058
3059static void hci_power_off(struct work_struct *work)
3060{
3243553f 3061 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3062 power_off.work);
ab81cbf9
JH
3063
3064 BT_DBG("%s", hdev->name);
3065
8ee56540 3066 hci_dev_do_close(hdev);
ab81cbf9
JH
3067}
3068
16ab91ab
JH
3069static void hci_discov_off(struct work_struct *work)
3070{
3071 struct hci_dev *hdev;
16ab91ab
JH
3072
3073 hdev = container_of(work, struct hci_dev, discov_off.work);
3074
3075 BT_DBG("%s", hdev->name);
3076
d1967ff8 3077 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3078}
3079
35f7498a 3080void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3081{
4821002c 3082 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3083
4821002c
JH
3084 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3085 list_del(&uuid->list);
2aeb9a1a
JH
3086 kfree(uuid);
3087 }
2aeb9a1a
JH
3088}
3089
35f7498a 3090void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3091{
3092 struct list_head *p, *n;
3093
3094 list_for_each_safe(p, n, &hdev->link_keys) {
3095 struct link_key *key;
3096
3097 key = list_entry(p, struct link_key, list);
3098
3099 list_del(p);
3100 kfree(key);
3101 }
55ed8ca1
JH
3102}
3103
35f7498a 3104void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3105{
970d0f1b 3106 struct smp_ltk *k;
b899efaf 3107
970d0f1b
JH
3108 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3109 list_del_rcu(&k->list);
3110 kfree_rcu(k, rcu);
b899efaf 3111 }
b899efaf
VCG
3112}
3113
970c4e46
JH
3114void hci_smp_irks_clear(struct hci_dev *hdev)
3115{
adae20cb 3116 struct smp_irk *k;
970c4e46 3117
adae20cb
JH
3118 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3119 list_del_rcu(&k->list);
3120 kfree_rcu(k, rcu);
970c4e46
JH
3121 }
3122}
3123
55ed8ca1
JH
3124struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3125{
8035ded4 3126 struct link_key *k;
55ed8ca1 3127
8035ded4 3128 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3129 if (bacmp(bdaddr, &k->bdaddr) == 0)
3130 return k;
55ed8ca1
JH
3131
3132 return NULL;
3133}
3134
745c0ce3 3135static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3136 u8 key_type, u8 old_key_type)
d25e28ab
JH
3137{
3138 /* Legacy key */
3139 if (key_type < 0x03)
745c0ce3 3140 return true;
d25e28ab
JH
3141
3142 /* Debug keys are insecure so don't store them persistently */
3143 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3144 return false;
d25e28ab
JH
3145
3146 /* Changed combination key and there's no previous one */
3147 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3148 return false;
d25e28ab
JH
3149
3150 /* Security mode 3 case */
3151 if (!conn)
745c0ce3 3152 return true;
d25e28ab
JH
3153
3154 /* Neither local nor remote side had no-bonding as requirement */
3155 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3156 return true;
d25e28ab
JH
3157
3158 /* Local side had dedicated bonding as requirement */
3159 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3160 return true;
d25e28ab
JH
3161
3162 /* Remote side had dedicated bonding as requirement */
3163 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3164 return true;
d25e28ab
JH
3165
3166 /* If none of the above criteria match, then don't store the key
3167 * persistently */
745c0ce3 3168 return false;
d25e28ab
JH
3169}
3170
e804d25d 3171static u8 ltk_role(u8 type)
98a0b845 3172{
e804d25d
JH
3173 if (type == SMP_LTK)
3174 return HCI_ROLE_MASTER;
98a0b845 3175
e804d25d 3176 return HCI_ROLE_SLAVE;
98a0b845
JH
3177}
3178
fe39c7b2 3179struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3180 u8 role)
75d262c2 3181{
c9839a11 3182 struct smp_ltk *k;
75d262c2 3183
970d0f1b
JH
3184 rcu_read_lock();
3185 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
fe39c7b2 3186 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3187 continue;
3188
e804d25d 3189 if (ltk_role(k->type) != role)
98a0b845
JH
3190 continue;
3191
970d0f1b 3192 rcu_read_unlock();
c9839a11 3193 return k;
75d262c2 3194 }
970d0f1b 3195 rcu_read_unlock();
75d262c2
VCG
3196
3197 return NULL;
3198}
75d262c2 3199
c9839a11 3200struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3201 u8 addr_type, u8 role)
75d262c2 3202{
c9839a11 3203 struct smp_ltk *k;
75d262c2 3204
970d0f1b
JH
3205 rcu_read_lock();
3206 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
c9839a11 3207 if (addr_type == k->bdaddr_type &&
98a0b845 3208 bacmp(bdaddr, &k->bdaddr) == 0 &&
970d0f1b
JH
3209 ltk_role(k->type) == role) {
3210 rcu_read_unlock();
75d262c2 3211 return k;
970d0f1b
JH
3212 }
3213 }
3214 rcu_read_unlock();
75d262c2
VCG
3215
3216 return NULL;
3217}
75d262c2 3218
970c4e46
JH
3219struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3220{
3221 struct smp_irk *irk;
3222
adae20cb
JH
3223 rcu_read_lock();
3224 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3225 if (!bacmp(&irk->rpa, rpa)) {
3226 rcu_read_unlock();
970c4e46 3227 return irk;
adae20cb 3228 }
970c4e46
JH
3229 }
3230
adae20cb 3231 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3232 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3233 bacpy(&irk->rpa, rpa);
adae20cb 3234 rcu_read_unlock();
970c4e46
JH
3235 return irk;
3236 }
3237 }
adae20cb 3238 rcu_read_unlock();
970c4e46
JH
3239
3240 return NULL;
3241}
3242
3243struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 addr_type)
3245{
3246 struct smp_irk *irk;
3247
6cfc9988
JH
3248 /* Identity Address must be public or static random */
3249 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3250 return NULL;
3251
adae20cb
JH
3252 rcu_read_lock();
3253 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3254 if (addr_type == irk->addr_type &&
adae20cb
JH
3255 bacmp(bdaddr, &irk->bdaddr) == 0) {
3256 rcu_read_unlock();
970c4e46 3257 return irk;
adae20cb 3258 }
970c4e46 3259 }
adae20cb 3260 rcu_read_unlock();
970c4e46
JH
3261
3262 return NULL;
3263}
3264
567fa2aa 3265struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3266 bdaddr_t *bdaddr, u8 *val, u8 type,
3267 u8 pin_len, bool *persistent)
55ed8ca1
JH
3268{
3269 struct link_key *key, *old_key;
745c0ce3 3270 u8 old_key_type;
55ed8ca1
JH
3271
3272 old_key = hci_find_link_key(hdev, bdaddr);
3273 if (old_key) {
3274 old_key_type = old_key->type;
3275 key = old_key;
3276 } else {
12adcf3a 3277 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3278 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3279 if (!key)
567fa2aa 3280 return NULL;
55ed8ca1
JH
3281 list_add(&key->list, &hdev->link_keys);
3282 }
3283
6ed93dc6 3284 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3285
d25e28ab
JH
3286 /* Some buggy controller combinations generate a changed
3287 * combination key for legacy pairing even when there's no
3288 * previous key */
3289 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3290 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3291 type = HCI_LK_COMBINATION;
655fe6ec
JH
3292 if (conn)
3293 conn->key_type = type;
3294 }
d25e28ab 3295
55ed8ca1 3296 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3297 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3298 key->pin_len = pin_len;
3299
b6020ba0 3300 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3301 key->type = old_key_type;
4748fed2
JH
3302 else
3303 key->type = type;
3304
7652ff6a
JH
3305 if (persistent)
3306 *persistent = hci_persistent_key(hdev, conn, type,
3307 old_key_type);
4df378a1 3308
567fa2aa 3309 return key;
55ed8ca1
JH
3310}
3311
ca9142b8 3312struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3313 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3314 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3315{
c9839a11 3316 struct smp_ltk *key, *old_key;
e804d25d 3317 u8 role = ltk_role(type);
75d262c2 3318
e804d25d 3319 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3320 if (old_key)
75d262c2 3321 key = old_key;
c9839a11 3322 else {
0a14ab41 3323 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3324 if (!key)
ca9142b8 3325 return NULL;
970d0f1b 3326 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3327 }
3328
75d262c2 3329 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3330 key->bdaddr_type = addr_type;
3331 memcpy(key->val, tk, sizeof(key->val));
3332 key->authenticated = authenticated;
3333 key->ediv = ediv;
fe39c7b2 3334 key->rand = rand;
c9839a11
VCG
3335 key->enc_size = enc_size;
3336 key->type = type;
75d262c2 3337
ca9142b8 3338 return key;
75d262c2
VCG
3339}
3340
ca9142b8
JH
3341struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3342 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3343{
3344 struct smp_irk *irk;
3345
3346 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3347 if (!irk) {
3348 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3349 if (!irk)
ca9142b8 3350 return NULL;
970c4e46
JH
3351
3352 bacpy(&irk->bdaddr, bdaddr);
3353 irk->addr_type = addr_type;
3354
adae20cb 3355 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3356 }
3357
3358 memcpy(irk->val, val, 16);
3359 bacpy(&irk->rpa, rpa);
3360
ca9142b8 3361 return irk;
970c4e46
JH
3362}
3363
55ed8ca1
JH
3364int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3365{
3366 struct link_key *key;
3367
3368 key = hci_find_link_key(hdev, bdaddr);
3369 if (!key)
3370 return -ENOENT;
3371
6ed93dc6 3372 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3373
3374 list_del(&key->list);
3375 kfree(key);
3376
3377 return 0;
3378}
3379
e0b2b27e 3380int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3381{
970d0f1b 3382 struct smp_ltk *k;
c51ffa0b 3383 int removed = 0;
b899efaf 3384
970d0f1b 3385 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3386 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3387 continue;
3388
6ed93dc6 3389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3390
970d0f1b
JH
3391 list_del_rcu(&k->list);
3392 kfree_rcu(k, rcu);
c51ffa0b 3393 removed++;
b899efaf
VCG
3394 }
3395
c51ffa0b 3396 return removed ? 0 : -ENOENT;
b899efaf
VCG
3397}
3398
a7ec7338
JH
3399void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3400{
adae20cb 3401 struct smp_irk *k;
a7ec7338 3402
adae20cb 3403 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3404 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3405 continue;
3406
3407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3408
adae20cb
JH
3409 list_del_rcu(&k->list);
3410 kfree_rcu(k, rcu);
a7ec7338
JH
3411 }
3412}
3413
6bd32326 3414/* HCI command timer function */
65cc2b49 3415static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3416{
65cc2b49
MH
3417 struct hci_dev *hdev = container_of(work, struct hci_dev,
3418 cmd_timer.work);
6bd32326 3419
bda4f23a
AE
3420 if (hdev->sent_cmd) {
3421 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3422 u16 opcode = __le16_to_cpu(sent->opcode);
3423
3424 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3425 } else {
3426 BT_ERR("%s command tx timeout", hdev->name);
3427 }
3428
6bd32326 3429 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3430 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3431}
3432
2763eda6 3433struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3434 bdaddr_t *bdaddr)
2763eda6
SJ
3435{
3436 struct oob_data *data;
3437
3438 list_for_each_entry(data, &hdev->remote_oob_data, list)
3439 if (bacmp(bdaddr, &data->bdaddr) == 0)
3440 return data;
3441
3442 return NULL;
3443}
3444
3445int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3446{
3447 struct oob_data *data;
3448
3449 data = hci_find_remote_oob_data(hdev, bdaddr);
3450 if (!data)
3451 return -ENOENT;
3452
6ed93dc6 3453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3454
3455 list_del(&data->list);
3456 kfree(data);
3457
3458 return 0;
3459}
3460
35f7498a 3461void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3462{
3463 struct oob_data *data, *n;
3464
3465 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3466 list_del(&data->list);
3467 kfree(data);
3468 }
2763eda6
SJ
3469}
3470
0798872e
MH
3471int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3472 u8 *hash, u8 *randomizer)
2763eda6
SJ
3473{
3474 struct oob_data *data;
3475
3476 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3477 if (!data) {
0a14ab41 3478 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3479 if (!data)
3480 return -ENOMEM;
3481
3482 bacpy(&data->bdaddr, bdaddr);
3483 list_add(&data->list, &hdev->remote_oob_data);
3484 }
3485
519ca9d0
MH
3486 memcpy(data->hash192, hash, sizeof(data->hash192));
3487 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3488
0798872e
MH
3489 memset(data->hash256, 0, sizeof(data->hash256));
3490 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3491
3492 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3493
3494 return 0;
3495}
3496
3497int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3498 u8 *hash192, u8 *randomizer192,
3499 u8 *hash256, u8 *randomizer256)
3500{
3501 struct oob_data *data;
3502
3503 data = hci_find_remote_oob_data(hdev, bdaddr);
3504 if (!data) {
0a14ab41 3505 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3506 if (!data)
3507 return -ENOMEM;
3508
3509 bacpy(&data->bdaddr, bdaddr);
3510 list_add(&data->list, &hdev->remote_oob_data);
3511 }
3512
3513 memcpy(data->hash192, hash192, sizeof(data->hash192));
3514 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3515
3516 memcpy(data->hash256, hash256, sizeof(data->hash256));
3517 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3518
6ed93dc6 3519 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3520
3521 return 0;
3522}
3523
dcc36c16 3524struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3525 bdaddr_t *bdaddr, u8 type)
b2a66aad 3526{
8035ded4 3527 struct bdaddr_list *b;
b2a66aad 3528
dcc36c16 3529 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3530 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3531 return b;
b9ee0a78 3532 }
b2a66aad
AJ
3533
3534 return NULL;
3535}
3536
dcc36c16 3537void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3538{
3539 struct list_head *p, *n;
3540
dcc36c16 3541 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3542 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3543
3544 list_del(p);
3545 kfree(b);
3546 }
b2a66aad
AJ
3547}
3548
dcc36c16 3549int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3550{
3551 struct bdaddr_list *entry;
b2a66aad 3552
b9ee0a78 3553 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3554 return -EBADF;
3555
dcc36c16 3556 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3557 return -EEXIST;
b2a66aad 3558
27f70f3e 3559 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3560 if (!entry)
3561 return -ENOMEM;
b2a66aad
AJ
3562
3563 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3564 entry->bdaddr_type = type;
b2a66aad 3565
dcc36c16 3566 list_add(&entry->list, list);
b2a66aad 3567
2a8357f2 3568 return 0;
b2a66aad
AJ
3569}
3570
dcc36c16 3571int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3572{
3573 struct bdaddr_list *entry;
b2a66aad 3574
35f7498a 3575 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3576 hci_bdaddr_list_clear(list);
35f7498a
JH
3577 return 0;
3578 }
b2a66aad 3579
dcc36c16 3580 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3581 if (!entry)
3582 return -ENOENT;
3583
3584 list_del(&entry->list);
3585 kfree(entry);
3586
3587 return 0;
3588}
3589
15819a70
AG
3590/* This function requires the caller holds hdev->lock */
3591struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3592 bdaddr_t *addr, u8 addr_type)
3593{
3594 struct hci_conn_params *params;
3595
738f6185
JH
3596 /* The conn params list only contains identity addresses */
3597 if (!hci_is_identity_address(addr, addr_type))
3598 return NULL;
3599
15819a70
AG
3600 list_for_each_entry(params, &hdev->le_conn_params, list) {
3601 if (bacmp(&params->addr, addr) == 0 &&
3602 params->addr_type == addr_type) {
3603 return params;
3604 }
3605 }
3606
3607 return NULL;
3608}
3609
cef952ce
AG
3610static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3611{
3612 struct hci_conn *conn;
3613
3614 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3615 if (!conn)
3616 return false;
3617
3618 if (conn->dst_type != type)
3619 return false;
3620
3621 if (conn->state != BT_CONNECTED)
3622 return false;
3623
3624 return true;
3625}
3626
4b10966f 3627/* This function requires the caller holds hdev->lock */
501f8827
JH
3628struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3629 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3630{
912b42ef 3631 struct hci_conn_params *param;
a9b0a04c 3632
738f6185
JH
3633 /* The list only contains identity addresses */
3634 if (!hci_is_identity_address(addr, addr_type))
3635 return NULL;
a9b0a04c 3636
501f8827 3637 list_for_each_entry(param, list, action) {
912b42ef
JH
3638 if (bacmp(&param->addr, addr) == 0 &&
3639 param->addr_type == addr_type)
3640 return param;
4b10966f
MH
3641 }
3642
3643 return NULL;
a9b0a04c
AG
3644}
3645
15819a70 3646/* This function requires the caller holds hdev->lock */
51d167c0
MH
3647struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3648 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3649{
3650 struct hci_conn_params *params;
3651
c46245b3 3652 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3653 return NULL;
a9b0a04c 3654
15819a70 3655 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3656 if (params)
51d167c0 3657 return params;
15819a70
AG
3658
3659 params = kzalloc(sizeof(*params), GFP_KERNEL);
3660 if (!params) {
3661 BT_ERR("Out of memory");
51d167c0 3662 return NULL;
15819a70
AG
3663 }
3664
3665 bacpy(&params->addr, addr);
3666 params->addr_type = addr_type;
cef952ce
AG
3667
3668 list_add(&params->list, &hdev->le_conn_params);
93450c75 3669 INIT_LIST_HEAD(&params->action);
cef952ce 3670
bf5b3c8b
MH
3671 params->conn_min_interval = hdev->le_conn_min_interval;
3672 params->conn_max_interval = hdev->le_conn_max_interval;
3673 params->conn_latency = hdev->le_conn_latency;
3674 params->supervision_timeout = hdev->le_supv_timeout;
3675 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3676
3677 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3678
51d167c0 3679 return params;
bf5b3c8b
MH
3680}
3681
3682/* This function requires the caller holds hdev->lock */
3683int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3684 u8 auto_connect)
15819a70
AG
3685{
3686 struct hci_conn_params *params;
3687
8c87aae1
MH
3688 params = hci_conn_params_add(hdev, addr, addr_type);
3689 if (!params)
3690 return -EIO;
cef952ce 3691
42ce26de
JH
3692 if (params->auto_connect == auto_connect)
3693 return 0;
3694
95305baa 3695 list_del_init(&params->action);
15819a70 3696
cef952ce
AG
3697 switch (auto_connect) {
3698 case HCI_AUTO_CONN_DISABLED:
3699 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3700 hci_update_background_scan(hdev);
cef952ce 3701 break;
851efca8 3702 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3703 list_add(&params->action, &hdev->pend_le_reports);
3704 hci_update_background_scan(hdev);
cef952ce 3705 break;
4b9e7e75 3706 case HCI_AUTO_CONN_DIRECT:
cef952ce 3707 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3708 if (!is_connected(hdev, addr, addr_type)) {
3709 list_add(&params->action, &hdev->pend_le_conns);
3710 hci_update_background_scan(hdev);
3711 }
cef952ce
AG
3712 break;
3713 }
15819a70 3714
851efca8
JH
3715 params->auto_connect = auto_connect;
3716
d06b50ce
MH
3717 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3718 auto_connect);
a9b0a04c
AG
3719
3720 return 0;
15819a70
AG
3721}
3722
f6c63249 3723static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3724{
f8aaf9b6 3725 if (params->conn) {
f161dd41 3726 hci_conn_drop(params->conn);
f8aaf9b6
JH
3727 hci_conn_put(params->conn);
3728 }
f161dd41 3729
95305baa 3730 list_del(&params->action);
15819a70
AG
3731 list_del(&params->list);
3732 kfree(params);
f6c63249
JH
3733}
3734
3735/* This function requires the caller holds hdev->lock */
3736void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3737{
3738 struct hci_conn_params *params;
3739
3740 params = hci_conn_params_lookup(hdev, addr, addr_type);
3741 if (!params)
3742 return;
3743
3744 hci_conn_params_free(params);
15819a70 3745
95305baa
JH
3746 hci_update_background_scan(hdev);
3747
15819a70
AG
3748 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3749}
3750
3751/* This function requires the caller holds hdev->lock */
55af49a8 3752void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3753{
3754 struct hci_conn_params *params, *tmp;
3755
3756 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3757 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3758 continue;
15819a70
AG
3759 list_del(&params->list);
3760 kfree(params);
3761 }
3762
55af49a8 3763 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3764}
3765
3766/* This function requires the caller holds hdev->lock */
373110c5 3767void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3768{
15819a70 3769 struct hci_conn_params *params, *tmp;
77a77a30 3770
f6c63249
JH
3771 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3772 hci_conn_params_free(params);
77a77a30 3773
a4790dbd 3774 hci_update_background_scan(hdev);
77a77a30 3775
15819a70 3776 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3777}
3778
4c87eaab 3779static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3780{
4c87eaab
AG
3781 if (status) {
3782 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3783
4c87eaab
AG
3784 hci_dev_lock(hdev);
3785 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3786 hci_dev_unlock(hdev);
3787 return;
3788 }
7ba8b4be
AG
3789}
3790
4c87eaab 3791static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3792{
4c87eaab
AG
3793 /* General inquiry access code (GIAC) */
3794 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3795 struct hci_request req;
3796 struct hci_cp_inquiry cp;
7ba8b4be
AG
3797 int err;
3798
4c87eaab
AG
3799 if (status) {
3800 BT_ERR("Failed to disable LE scanning: status %d", status);
3801 return;
3802 }
7ba8b4be 3803
4c87eaab
AG
3804 switch (hdev->discovery.type) {
3805 case DISCOV_TYPE_LE:
3806 hci_dev_lock(hdev);
3807 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3808 hci_dev_unlock(hdev);
3809 break;
7ba8b4be 3810
4c87eaab
AG
3811 case DISCOV_TYPE_INTERLEAVED:
3812 hci_req_init(&req, hdev);
7ba8b4be 3813
4c87eaab
AG
3814 memset(&cp, 0, sizeof(cp));
3815 memcpy(&cp.lap, lap, sizeof(cp.lap));
3816 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3817 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3818
4c87eaab 3819 hci_dev_lock(hdev);
7dbfac1d 3820
4c87eaab 3821 hci_inquiry_cache_flush(hdev);
7dbfac1d 3822
4c87eaab
AG
3823 err = hci_req_run(&req, inquiry_complete);
3824 if (err) {
3825 BT_ERR("Inquiry request failed: err %d", err);
3826 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827 }
7dbfac1d 3828
4c87eaab
AG
3829 hci_dev_unlock(hdev);
3830 break;
7dbfac1d 3831 }
7dbfac1d
AG
3832}
3833
7ba8b4be
AG
3834static void le_scan_disable_work(struct work_struct *work)
3835{
3836 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3837 le_scan_disable.work);
4c87eaab
AG
3838 struct hci_request req;
3839 int err;
7ba8b4be
AG
3840
3841 BT_DBG("%s", hdev->name);
3842
4c87eaab 3843 hci_req_init(&req, hdev);
28b75a89 3844
b1efcc28 3845 hci_req_add_le_scan_disable(&req);
28b75a89 3846
4c87eaab
AG
3847 err = hci_req_run(&req, le_scan_disable_work_complete);
3848 if (err)
3849 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3850}
3851
8d97250e
JH
3852static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3853{
3854 struct hci_dev *hdev = req->hdev;
3855
3856 /* If we're advertising or initiating an LE connection we can't
3857 * go ahead and change the random address at this time. This is
3858 * because the eventual initiator address used for the
3859 * subsequently created connection will be undefined (some
3860 * controllers use the new address and others the one we had
3861 * when the operation started).
3862 *
3863 * In this kind of scenario skip the update and let the random
3864 * address be updated at the next cycle.
3865 */
5ce194c4 3866 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3867 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3868 BT_DBG("Deferring random address update");
9a783a13 3869 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3870 return;
3871 }
3872
3873 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3874}
3875
94b1fc92
MH
3876int hci_update_random_address(struct hci_request *req, bool require_privacy,
3877 u8 *own_addr_type)
ebd3a747
JH
3878{
3879 struct hci_dev *hdev = req->hdev;
3880 int err;
3881
3882 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3883 * current RPA has expired or there is something else than
3884 * the current RPA in use, then generate a new one.
ebd3a747
JH
3885 */
3886 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3887 int to;
3888
3889 *own_addr_type = ADDR_LE_DEV_RANDOM;
3890
3891 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3892 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3893 return 0;
3894
defce9e8 3895 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3896 if (err < 0) {
3897 BT_ERR("%s failed to generate new RPA", hdev->name);
3898 return err;
3899 }
3900
8d97250e 3901 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3902
3903 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3904 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3905
3906 return 0;
94b1fc92
MH
3907 }
3908
3909 /* In case of required privacy without resolvable private address,
3910 * use an unresolvable private address. This is useful for active
3911 * scanning and non-connectable advertising.
3912 */
3913 if (require_privacy) {
3914 bdaddr_t urpa;
3915
3916 get_random_bytes(&urpa, 6);
3917 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3918
3919 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3920 set_random_addr(req, &urpa);
94b1fc92 3921 return 0;
ebd3a747
JH
3922 }
3923
3924 /* If forcing static address is in use or there is no public
3925 * address use the static address as random address (but skip
3926 * the HCI command if the current random address is already the
3927 * static one.
3928 */
111902f7 3929 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
3932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3933 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3934 &hdev->static_addr);
3935 return 0;
3936 }
3937
3938 /* Neither privacy nor static address is being used so use a
3939 * public address.
3940 */
3941 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3942
3943 return 0;
3944}
3945
a1f4c318
JH
3946/* Copy the Identity Address of the controller.
3947 *
3948 * If the controller has a public BD_ADDR, then by default use that one.
3949 * If this is a LE only controller without a public address, default to
3950 * the static random address.
3951 *
3952 * For debugging purposes it is possible to force controllers with a
3953 * public address to use the static random address instead.
3954 */
3955void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3956 u8 *bdaddr_type)
3957{
111902f7 3958 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3959 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3960 bacpy(bdaddr, &hdev->static_addr);
3961 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3962 } else {
3963 bacpy(bdaddr, &hdev->bdaddr);
3964 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3965 }
3966}
3967
9be0dab7
DH
3968/* Alloc HCI device */
3969struct hci_dev *hci_alloc_dev(void)
3970{
3971 struct hci_dev *hdev;
3972
27f70f3e 3973 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3974 if (!hdev)
3975 return NULL;
3976
b1b813d4
DH
3977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3978 hdev->esco_type = (ESCO_HV1);
3979 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3980 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3981 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3982 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3983 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3984 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3985
b1b813d4
DH
3986 hdev->sniff_max_interval = 800;
3987 hdev->sniff_min_interval = 80;
3988
3f959d46 3989 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3990 hdev->le_adv_min_interval = 0x0800;
3991 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3992 hdev->le_scan_interval = 0x0060;
3993 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3994 hdev->le_conn_min_interval = 0x0028;
3995 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3996 hdev->le_conn_latency = 0x0000;
3997 hdev->le_supv_timeout = 0x002a;
bef64738 3998
d6bfd59c 3999 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 4000 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
4001 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4002 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4003
b1b813d4
DH
4004 mutex_init(&hdev->lock);
4005 mutex_init(&hdev->req_lock);
4006
4007 INIT_LIST_HEAD(&hdev->mgmt_pending);
4008 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4009 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4010 INIT_LIST_HEAD(&hdev->uuids);
4011 INIT_LIST_HEAD(&hdev->link_keys);
4012 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4013 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4014 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4015 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4016 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4017 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4018 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4019 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4020
4021 INIT_WORK(&hdev->rx_work, hci_rx_work);
4022 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4023 INIT_WORK(&hdev->tx_work, hci_tx_work);
4024 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4025
b1b813d4
DH
4026 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4027 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4028 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4029
b1b813d4
DH
4030 skb_queue_head_init(&hdev->rx_q);
4031 skb_queue_head_init(&hdev->cmd_q);
4032 skb_queue_head_init(&hdev->raw_q);
4033
4034 init_waitqueue_head(&hdev->req_wait_q);
4035
65cc2b49 4036 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4037
b1b813d4
DH
4038 hci_init_sysfs(hdev);
4039 discovery_init(hdev);
9be0dab7
DH
4040
4041 return hdev;
4042}
4043EXPORT_SYMBOL(hci_alloc_dev);
4044
4045/* Free HCI device */
4046void hci_free_dev(struct hci_dev *hdev)
4047{
9be0dab7
DH
4048 /* will free via device release */
4049 put_device(&hdev->dev);
4050}
4051EXPORT_SYMBOL(hci_free_dev);
4052
1da177e4
LT
4053/* Register HCI device */
4054int hci_register_dev(struct hci_dev *hdev)
4055{
b1b813d4 4056 int id, error;
1da177e4 4057
74292d5a 4058 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4059 return -EINVAL;
4060
08add513
MM
4061 /* Do not allow HCI_AMP devices to register at index 0,
4062 * so the index can be used as the AMP controller ID.
4063 */
3df92b31
SL
4064 switch (hdev->dev_type) {
4065 case HCI_BREDR:
4066 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4067 break;
4068 case HCI_AMP:
4069 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4070 break;
4071 default:
4072 return -EINVAL;
1da177e4 4073 }
8e87d142 4074
3df92b31
SL
4075 if (id < 0)
4076 return id;
4077
1da177e4
LT
4078 sprintf(hdev->name, "hci%d", id);
4079 hdev->id = id;
2d8b3a11
AE
4080
4081 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4082
d8537548
KC
4083 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4084 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4085 if (!hdev->workqueue) {
4086 error = -ENOMEM;
4087 goto err;
4088 }
f48fd9c8 4089
d8537548
KC
4090 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4091 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4092 if (!hdev->req_workqueue) {
4093 destroy_workqueue(hdev->workqueue);
4094 error = -ENOMEM;
4095 goto err;
4096 }
4097
0153e2ec
MH
4098 if (!IS_ERR_OR_NULL(bt_debugfs))
4099 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4100
bdc3e0f1
MH
4101 dev_set_name(&hdev->dev, "%s", hdev->name);
4102
4103 error = device_add(&hdev->dev);
33ca954d 4104 if (error < 0)
54506918 4105 goto err_wqueue;
1da177e4 4106
611b30f7 4107 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4108 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4109 hdev);
611b30f7
MH
4110 if (hdev->rfkill) {
4111 if (rfkill_register(hdev->rfkill) < 0) {
4112 rfkill_destroy(hdev->rfkill);
4113 hdev->rfkill = NULL;
4114 }
4115 }
4116
5e130367
JH
4117 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4118 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4119
a8b2d5c2 4120 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4121 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4122
01cd3404 4123 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4124 /* Assume BR/EDR support until proven otherwise (such as
4125 * through reading supported features during init.
4126 */
4127 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4128 }
ce2be9ac 4129
fcee3377
GP
4130 write_lock(&hci_dev_list_lock);
4131 list_add(&hdev->list, &hci_dev_list);
4132 write_unlock(&hci_dev_list_lock);
4133
4a964404
MH
4134 /* Devices that are marked for raw-only usage are unconfigured
4135 * and should not be included in normal operation.
fee746b0
MH
4136 */
4137 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4138 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4139
1da177e4 4140 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4141 hci_dev_hold(hdev);
1da177e4 4142
19202573 4143 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4144
1da177e4 4145 return id;
f48fd9c8 4146
33ca954d
DH
4147err_wqueue:
4148 destroy_workqueue(hdev->workqueue);
6ead1bbc 4149 destroy_workqueue(hdev->req_workqueue);
33ca954d 4150err:
3df92b31 4151 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4152
33ca954d 4153 return error;
1da177e4
LT
4154}
4155EXPORT_SYMBOL(hci_register_dev);
4156
4157/* Unregister HCI device */
59735631 4158void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4159{
3df92b31 4160 int i, id;
ef222013 4161
c13854ce 4162 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4163
94324962
JH
4164 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4165
3df92b31
SL
4166 id = hdev->id;
4167
f20d09d5 4168 write_lock(&hci_dev_list_lock);
1da177e4 4169 list_del(&hdev->list);
f20d09d5 4170 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4171
4172 hci_dev_do_close(hdev);
4173
cd4c5391 4174 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4175 kfree_skb(hdev->reassembly[i]);
4176
b9b5ef18
GP
4177 cancel_work_sync(&hdev->power_on);
4178
ab81cbf9 4179 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4180 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4181 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4182 hci_dev_lock(hdev);
744cf19e 4183 mgmt_index_removed(hdev);
09fd0de5 4184 hci_dev_unlock(hdev);
56e5cb86 4185 }
ab81cbf9 4186
2e58ef3e
JH
4187 /* mgmt_index_removed should take care of emptying the
4188 * pending list */
4189 BUG_ON(!list_empty(&hdev->mgmt_pending));
4190
1da177e4
LT
4191 hci_notify(hdev, HCI_DEV_UNREG);
4192
611b30f7
MH
4193 if (hdev->rfkill) {
4194 rfkill_unregister(hdev->rfkill);
4195 rfkill_destroy(hdev->rfkill);
4196 }
4197
711eafe3 4198 smp_unregister(hdev);
99780a7b 4199
bdc3e0f1 4200 device_del(&hdev->dev);
147e2d59 4201
0153e2ec
MH
4202 debugfs_remove_recursive(hdev->debugfs);
4203
f48fd9c8 4204 destroy_workqueue(hdev->workqueue);
6ead1bbc 4205 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4206
09fd0de5 4207 hci_dev_lock(hdev);
dcc36c16 4208 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4209 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4210 hci_uuids_clear(hdev);
55ed8ca1 4211 hci_link_keys_clear(hdev);
b899efaf 4212 hci_smp_ltks_clear(hdev);
970c4e46 4213 hci_smp_irks_clear(hdev);
2763eda6 4214 hci_remote_oob_data_clear(hdev);
dcc36c16 4215 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4216 hci_conn_params_clear_all(hdev);
09fd0de5 4217 hci_dev_unlock(hdev);
e2e0cacb 4218
dc946bd8 4219 hci_dev_put(hdev);
3df92b31
SL
4220
4221 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4222}
4223EXPORT_SYMBOL(hci_unregister_dev);
4224
4225/* Suspend HCI device */
4226int hci_suspend_dev(struct hci_dev *hdev)
4227{
4228 hci_notify(hdev, HCI_DEV_SUSPEND);
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_suspend_dev);
4232
4233/* Resume HCI device */
4234int hci_resume_dev(struct hci_dev *hdev)
4235{
4236 hci_notify(hdev, HCI_DEV_RESUME);
4237 return 0;
4238}
4239EXPORT_SYMBOL(hci_resume_dev);
4240
75e0569f
MH
4241/* Reset HCI device */
4242int hci_reset_dev(struct hci_dev *hdev)
4243{
4244 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4245 struct sk_buff *skb;
4246
4247 skb = bt_skb_alloc(3, GFP_ATOMIC);
4248 if (!skb)
4249 return -ENOMEM;
4250
4251 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4252 memcpy(skb_put(skb, 3), hw_err, 3);
4253
4254 /* Send Hardware Error to upper stack */
4255 return hci_recv_frame(hdev, skb);
4256}
4257EXPORT_SYMBOL(hci_reset_dev);
4258
76bca880 4259/* Receive frame from HCI drivers */
e1a26170 4260int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4261{
76bca880 4262 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4263 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4264 kfree_skb(skb);
4265 return -ENXIO;
4266 }
4267
d82603c6 4268 /* Incoming skb */
76bca880
MH
4269 bt_cb(skb)->incoming = 1;
4270
4271 /* Time stamp */
4272 __net_timestamp(skb);
4273
76bca880 4274 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4275 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4276
76bca880
MH
4277 return 0;
4278}
4279EXPORT_SYMBOL(hci_recv_frame);
4280
33e882a5 4281static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4282 int count, __u8 index)
33e882a5
SS
4283{
4284 int len = 0;
4285 int hlen = 0;
4286 int remain = count;
4287 struct sk_buff *skb;
4288 struct bt_skb_cb *scb;
4289
4290 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4291 index >= NUM_REASSEMBLY)
33e882a5
SS
4292 return -EILSEQ;
4293
4294 skb = hdev->reassembly[index];
4295
4296 if (!skb) {
4297 switch (type) {
4298 case HCI_ACLDATA_PKT:
4299 len = HCI_MAX_FRAME_SIZE;
4300 hlen = HCI_ACL_HDR_SIZE;
4301 break;
4302 case HCI_EVENT_PKT:
4303 len = HCI_MAX_EVENT_SIZE;
4304 hlen = HCI_EVENT_HDR_SIZE;
4305 break;
4306 case HCI_SCODATA_PKT:
4307 len = HCI_MAX_SCO_SIZE;
4308 hlen = HCI_SCO_HDR_SIZE;
4309 break;
4310 }
4311
1e429f38 4312 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4313 if (!skb)
4314 return -ENOMEM;
4315
4316 scb = (void *) skb->cb;
4317 scb->expect = hlen;
4318 scb->pkt_type = type;
4319
33e882a5
SS
4320 hdev->reassembly[index] = skb;
4321 }
4322
4323 while (count) {
4324 scb = (void *) skb->cb;
89bb46d0 4325 len = min_t(uint, scb->expect, count);
33e882a5
SS
4326
4327 memcpy(skb_put(skb, len), data, len);
4328
4329 count -= len;
4330 data += len;
4331 scb->expect -= len;
4332 remain = count;
4333
4334 switch (type) {
4335 case HCI_EVENT_PKT:
4336 if (skb->len == HCI_EVENT_HDR_SIZE) {
4337 struct hci_event_hdr *h = hci_event_hdr(skb);
4338 scb->expect = h->plen;
4339
4340 if (skb_tailroom(skb) < scb->expect) {
4341 kfree_skb(skb);
4342 hdev->reassembly[index] = NULL;
4343 return -ENOMEM;
4344 }
4345 }
4346 break;
4347
4348 case HCI_ACLDATA_PKT:
4349 if (skb->len == HCI_ACL_HDR_SIZE) {
4350 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4351 scb->expect = __le16_to_cpu(h->dlen);
4352
4353 if (skb_tailroom(skb) < scb->expect) {
4354 kfree_skb(skb);
4355 hdev->reassembly[index] = NULL;
4356 return -ENOMEM;
4357 }
4358 }
4359 break;
4360
4361 case HCI_SCODATA_PKT:
4362 if (skb->len == HCI_SCO_HDR_SIZE) {
4363 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4364 scb->expect = h->dlen;
4365
4366 if (skb_tailroom(skb) < scb->expect) {
4367 kfree_skb(skb);
4368 hdev->reassembly[index] = NULL;
4369 return -ENOMEM;
4370 }
4371 }
4372 break;
4373 }
4374
4375 if (scb->expect == 0) {
4376 /* Complete frame */
4377
4378 bt_cb(skb)->pkt_type = type;
e1a26170 4379 hci_recv_frame(hdev, skb);
33e882a5
SS
4380
4381 hdev->reassembly[index] = NULL;
4382 return remain;
4383 }
4384 }
4385
4386 return remain;
4387}
4388
99811510
SS
4389#define STREAM_REASSEMBLY 0
4390
4391int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4392{
4393 int type;
4394 int rem = 0;
4395
da5f6c37 4396 while (count) {
99811510
SS
4397 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4398
4399 if (!skb) {
4400 struct { char type; } *pkt;
4401
4402 /* Start of the frame */
4403 pkt = data;
4404 type = pkt->type;
4405
4406 data++;
4407 count--;
4408 } else
4409 type = bt_cb(skb)->pkt_type;
4410
1e429f38 4411 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4412 STREAM_REASSEMBLY);
99811510
SS
4413 if (rem < 0)
4414 return rem;
4415
4416 data += (count - rem);
4417 count = rem;
f81c6224 4418 }
99811510
SS
4419
4420 return rem;
4421}
4422EXPORT_SYMBOL(hci_recv_stream_fragment);
4423
1da177e4
LT
4424/* ---- Interface to upper protocols ---- */
4425
1da177e4
LT
4426int hci_register_cb(struct hci_cb *cb)
4427{
4428 BT_DBG("%p name %s", cb, cb->name);
4429
f20d09d5 4430 write_lock(&hci_cb_list_lock);
1da177e4 4431 list_add(&cb->list, &hci_cb_list);
f20d09d5 4432 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4433
4434 return 0;
4435}
4436EXPORT_SYMBOL(hci_register_cb);
4437
4438int hci_unregister_cb(struct hci_cb *cb)
4439{
4440 BT_DBG("%p name %s", cb, cb->name);
4441
f20d09d5 4442 write_lock(&hci_cb_list_lock);
1da177e4 4443 list_del(&cb->list);
f20d09d5 4444 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4445
4446 return 0;
4447}
4448EXPORT_SYMBOL(hci_unregister_cb);
4449
51086991 4450static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4451{
cdc52faa
MH
4452 int err;
4453
0d48d939 4454 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4455
cd82e61c
MH
4456 /* Time stamp */
4457 __net_timestamp(skb);
1da177e4 4458
cd82e61c
MH
4459 /* Send copy to monitor */
4460 hci_send_to_monitor(hdev, skb);
4461
4462 if (atomic_read(&hdev->promisc)) {
4463 /* Send copy to the sockets */
470fe1b5 4464 hci_send_to_sock(hdev, skb);
1da177e4
LT
4465 }
4466
4467 /* Get rid of skb owner, prior to sending to the driver. */
4468 skb_orphan(skb);
4469
cdc52faa
MH
4470 err = hdev->send(hdev, skb);
4471 if (err < 0) {
4472 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4473 kfree_skb(skb);
4474 }
1da177e4
LT
4475}
4476
3119ae95
JH
4477void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4478{
4479 skb_queue_head_init(&req->cmd_q);
4480 req->hdev = hdev;
5d73e034 4481 req->err = 0;
3119ae95
JH
4482}
4483
4484int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4485{
4486 struct hci_dev *hdev = req->hdev;
4487 struct sk_buff *skb;
4488 unsigned long flags;
4489
4490 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4491
49c922bb 4492 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4493 * commands queued on the HCI request queue.
4494 */
4495 if (req->err) {
4496 skb_queue_purge(&req->cmd_q);
4497 return req->err;
4498 }
4499
3119ae95
JH
4500 /* Do not allow empty requests */
4501 if (skb_queue_empty(&req->cmd_q))
382b0c39 4502 return -ENODATA;
3119ae95
JH
4503
4504 skb = skb_peek_tail(&req->cmd_q);
4505 bt_cb(skb)->req.complete = complete;
4506
4507 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4508 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4509 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4510
4511 queue_work(hdev->workqueue, &hdev->cmd_work);
4512
4513 return 0;
4514}
4515
899de765
MH
4516bool hci_req_pending(struct hci_dev *hdev)
4517{
4518 return (hdev->req_status == HCI_REQ_PEND);
4519}
4520
1ca3a9d0 4521static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4522 u32 plen, const void *param)
1da177e4
LT
4523{
4524 int len = HCI_COMMAND_HDR_SIZE + plen;
4525 struct hci_command_hdr *hdr;
4526 struct sk_buff *skb;
4527
1da177e4 4528 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4529 if (!skb)
4530 return NULL;
1da177e4
LT
4531
4532 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4533 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4534 hdr->plen = plen;
4535
4536 if (plen)
4537 memcpy(skb_put(skb, plen), param, plen);
4538
4539 BT_DBG("skb len %d", skb->len);
4540
0d48d939 4541 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4542 bt_cb(skb)->opcode = opcode;
c78ae283 4543
1ca3a9d0
JH
4544 return skb;
4545}
4546
4547/* Send HCI command */
07dc93dd
JH
4548int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4549 const void *param)
1ca3a9d0
JH
4550{
4551 struct sk_buff *skb;
4552
4553 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4554
4555 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4556 if (!skb) {
4557 BT_ERR("%s no memory for command", hdev->name);
4558 return -ENOMEM;
4559 }
4560
49c922bb 4561 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4562 * single-command requests.
4563 */
4564 bt_cb(skb)->req.start = true;
4565
1da177e4 4566 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4567 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4568
4569 return 0;
4570}
1da177e4 4571
71c76a17 4572/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4573void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4574 const void *param, u8 event)
71c76a17
JH
4575{
4576 struct hci_dev *hdev = req->hdev;
4577 struct sk_buff *skb;
4578
4579 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4580
49c922bb 4581 /* If an error occurred during request building, there is no point in
34739c1e
AG
4582 * queueing the HCI command. We can simply return.
4583 */
4584 if (req->err)
4585 return;
4586
71c76a17
JH
4587 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4588 if (!skb) {
5d73e034
AG
4589 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4590 hdev->name, opcode);
4591 req->err = -ENOMEM;
e348fe6b 4592 return;
71c76a17
JH
4593 }
4594
4595 if (skb_queue_empty(&req->cmd_q))
4596 bt_cb(skb)->req.start = true;
4597
02350a72
JH
4598 bt_cb(skb)->req.event = event;
4599
71c76a17 4600 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4601}
4602
07dc93dd
JH
4603void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4604 const void *param)
02350a72
JH
4605{
4606 hci_req_add_ev(req, opcode, plen, param, 0);
4607}
4608
1da177e4 4609/* Get data from the previously sent command */
a9de9248 4610void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4611{
4612 struct hci_command_hdr *hdr;
4613
4614 if (!hdev->sent_cmd)
4615 return NULL;
4616
4617 hdr = (void *) hdev->sent_cmd->data;
4618
a9de9248 4619 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4620 return NULL;
4621
f0e09510 4622 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4623
4624 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4625}
4626
4627/* Send ACL data */
4628static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4629{
4630 struct hci_acl_hdr *hdr;
4631 int len = skb->len;
4632
badff6d0
ACM
4633 skb_push(skb, HCI_ACL_HDR_SIZE);
4634 skb_reset_transport_header(skb);
9c70220b 4635 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4636 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4637 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4638}
4639
ee22be7e 4640static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4641 struct sk_buff *skb, __u16 flags)
1da177e4 4642{
ee22be7e 4643 struct hci_conn *conn = chan->conn;
1da177e4
LT
4644 struct hci_dev *hdev = conn->hdev;
4645 struct sk_buff *list;
4646
087bfd99
GP
4647 skb->len = skb_headlen(skb);
4648 skb->data_len = 0;
4649
4650 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4651
4652 switch (hdev->dev_type) {
4653 case HCI_BREDR:
4654 hci_add_acl_hdr(skb, conn->handle, flags);
4655 break;
4656 case HCI_AMP:
4657 hci_add_acl_hdr(skb, chan->handle, flags);
4658 break;
4659 default:
4660 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4661 return;
4662 }
087bfd99 4663
70f23020
AE
4664 list = skb_shinfo(skb)->frag_list;
4665 if (!list) {
1da177e4
LT
4666 /* Non fragmented */
4667 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4668
73d80deb 4669 skb_queue_tail(queue, skb);
1da177e4
LT
4670 } else {
4671 /* Fragmented */
4672 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4673
4674 skb_shinfo(skb)->frag_list = NULL;
4675
9cfd5a23
JR
4676 /* Queue all fragments atomically. We need to use spin_lock_bh
4677 * here because of 6LoWPAN links, as there this function is
4678 * called from softirq and using normal spin lock could cause
4679 * deadlocks.
4680 */
4681 spin_lock_bh(&queue->lock);
1da177e4 4682
73d80deb 4683 __skb_queue_tail(queue, skb);
e702112f
AE
4684
4685 flags &= ~ACL_START;
4686 flags |= ACL_CONT;
1da177e4
LT
4687 do {
4688 skb = list; list = list->next;
8e87d142 4689
0d48d939 4690 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4691 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4692
4693 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4694
73d80deb 4695 __skb_queue_tail(queue, skb);
1da177e4
LT
4696 } while (list);
4697
9cfd5a23 4698 spin_unlock_bh(&queue->lock);
1da177e4 4699 }
73d80deb
LAD
4700}
4701
4702void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4703{
ee22be7e 4704 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4705
f0e09510 4706 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4707
ee22be7e 4708 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4709
3eff45ea 4710 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4711}
1da177e4
LT
4712
4713/* Send SCO data */
0d861d8b 4714void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4715{
4716 struct hci_dev *hdev = conn->hdev;
4717 struct hci_sco_hdr hdr;
4718
4719 BT_DBG("%s len %d", hdev->name, skb->len);
4720
aca3192c 4721 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4722 hdr.dlen = skb->len;
4723
badff6d0
ACM
4724 skb_push(skb, HCI_SCO_HDR_SIZE);
4725 skb_reset_transport_header(skb);
9c70220b 4726 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4727
0d48d939 4728 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4729
1da177e4 4730 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4731 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4732}
1da177e4
LT
4733
4734/* ---- HCI TX task (outgoing data) ---- */
4735
4736/* HCI Connection scheduler */
6039aa73
GP
4737static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4738 int *quote)
1da177e4
LT
4739{
4740 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4741 struct hci_conn *conn = NULL, *c;
abc5de8f 4742 unsigned int num = 0, min = ~0;
1da177e4 4743
8e87d142 4744 /* We don't have to lock device here. Connections are always
1da177e4 4745 * added and removed with TX task disabled. */
bf4c6325
GP
4746
4747 rcu_read_lock();
4748
4749 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4750 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4751 continue;
769be974
MH
4752
4753 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4754 continue;
4755
1da177e4
LT
4756 num++;
4757
4758 if (c->sent < min) {
4759 min = c->sent;
4760 conn = c;
4761 }
52087a79
LAD
4762
4763 if (hci_conn_num(hdev, type) == num)
4764 break;
1da177e4
LT
4765 }
4766
bf4c6325
GP
4767 rcu_read_unlock();
4768
1da177e4 4769 if (conn) {
6ed58ec5
VT
4770 int cnt, q;
4771
4772 switch (conn->type) {
4773 case ACL_LINK:
4774 cnt = hdev->acl_cnt;
4775 break;
4776 case SCO_LINK:
4777 case ESCO_LINK:
4778 cnt = hdev->sco_cnt;
4779 break;
4780 case LE_LINK:
4781 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4782 break;
4783 default:
4784 cnt = 0;
4785 BT_ERR("Unknown link type");
4786 }
4787
4788 q = cnt / num;
1da177e4
LT
4789 *quote = q ? q : 1;
4790 } else
4791 *quote = 0;
4792
4793 BT_DBG("conn %p quote %d", conn, *quote);
4794 return conn;
4795}
4796
6039aa73 4797static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4798{
4799 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4800 struct hci_conn *c;
1da177e4 4801
bae1f5d9 4802 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4803
bf4c6325
GP
4804 rcu_read_lock();
4805
1da177e4 4806 /* Kill stalled connections */
bf4c6325 4807 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4808 if (c->type == type && c->sent) {
6ed93dc6
AE
4809 BT_ERR("%s killing stalled connection %pMR",
4810 hdev->name, &c->dst);
bed71748 4811 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4812 }
4813 }
bf4c6325
GP
4814
4815 rcu_read_unlock();
1da177e4
LT
4816}
4817
6039aa73
GP
4818static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4819 int *quote)
1da177e4 4820{
73d80deb
LAD
4821 struct hci_conn_hash *h = &hdev->conn_hash;
4822 struct hci_chan *chan = NULL;
abc5de8f 4823 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4824 struct hci_conn *conn;
73d80deb
LAD
4825 int cnt, q, conn_num = 0;
4826
4827 BT_DBG("%s", hdev->name);
4828
bf4c6325
GP
4829 rcu_read_lock();
4830
4831 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4832 struct hci_chan *tmp;
4833
4834 if (conn->type != type)
4835 continue;
4836
4837 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4838 continue;
4839
4840 conn_num++;
4841
8192edef 4842 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4843 struct sk_buff *skb;
4844
4845 if (skb_queue_empty(&tmp->data_q))
4846 continue;
4847
4848 skb = skb_peek(&tmp->data_q);
4849 if (skb->priority < cur_prio)
4850 continue;
4851
4852 if (skb->priority > cur_prio) {
4853 num = 0;
4854 min = ~0;
4855 cur_prio = skb->priority;
4856 }
4857
4858 num++;
4859
4860 if (conn->sent < min) {
4861 min = conn->sent;
4862 chan = tmp;
4863 }
4864 }
4865
4866 if (hci_conn_num(hdev, type) == conn_num)
4867 break;
4868 }
4869
bf4c6325
GP
4870 rcu_read_unlock();
4871
73d80deb
LAD
4872 if (!chan)
4873 return NULL;
4874
4875 switch (chan->conn->type) {
4876 case ACL_LINK:
4877 cnt = hdev->acl_cnt;
4878 break;
bd1eb66b
AE
4879 case AMP_LINK:
4880 cnt = hdev->block_cnt;
4881 break;
73d80deb
LAD
4882 case SCO_LINK:
4883 case ESCO_LINK:
4884 cnt = hdev->sco_cnt;
4885 break;
4886 case LE_LINK:
4887 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4888 break;
4889 default:
4890 cnt = 0;
4891 BT_ERR("Unknown link type");
4892 }
4893
4894 q = cnt / num;
4895 *quote = q ? q : 1;
4896 BT_DBG("chan %p quote %d", chan, *quote);
4897 return chan;
4898}
4899
02b20f0b
LAD
4900static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4901{
4902 struct hci_conn_hash *h = &hdev->conn_hash;
4903 struct hci_conn *conn;
4904 int num = 0;
4905
4906 BT_DBG("%s", hdev->name);
4907
bf4c6325
GP
4908 rcu_read_lock();
4909
4910 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4911 struct hci_chan *chan;
4912
4913 if (conn->type != type)
4914 continue;
4915
4916 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4917 continue;
4918
4919 num++;
4920
8192edef 4921 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4922 struct sk_buff *skb;
4923
4924 if (chan->sent) {
4925 chan->sent = 0;
4926 continue;
4927 }
4928
4929 if (skb_queue_empty(&chan->data_q))
4930 continue;
4931
4932 skb = skb_peek(&chan->data_q);
4933 if (skb->priority >= HCI_PRIO_MAX - 1)
4934 continue;
4935
4936 skb->priority = HCI_PRIO_MAX - 1;
4937
4938 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4939 skb->priority);
02b20f0b
LAD
4940 }
4941
4942 if (hci_conn_num(hdev, type) == num)
4943 break;
4944 }
bf4c6325
GP
4945
4946 rcu_read_unlock();
4947
02b20f0b
LAD
4948}
4949
b71d385a
AE
4950static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4951{
4952 /* Calculate count of blocks used by this packet */
4953 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4954}
4955
6039aa73 4956static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4957{
4a964404 4958 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4959 /* ACL tx timeout must be longer than maximum
4960 * link supervision timeout (40.9 seconds) */
63d2bc1b 4961 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4962 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4963 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4964 }
63d2bc1b 4965}
1da177e4 4966
6039aa73 4967static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4968{
4969 unsigned int cnt = hdev->acl_cnt;
4970 struct hci_chan *chan;
4971 struct sk_buff *skb;
4972 int quote;
4973
4974 __check_timeout(hdev, cnt);
04837f64 4975
73d80deb 4976 while (hdev->acl_cnt &&
a8c5fb1a 4977 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4978 u32 priority = (skb_peek(&chan->data_q))->priority;
4979 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4980 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4981 skb->len, skb->priority);
73d80deb 4982
ec1cce24
LAD
4983 /* Stop if priority has changed */
4984 if (skb->priority < priority)
4985 break;
4986
4987 skb = skb_dequeue(&chan->data_q);
4988
73d80deb 4989 hci_conn_enter_active_mode(chan->conn,
04124681 4990 bt_cb(skb)->force_active);
04837f64 4991
57d17d70 4992 hci_send_frame(hdev, skb);
1da177e4
LT
4993 hdev->acl_last_tx = jiffies;
4994
4995 hdev->acl_cnt--;
73d80deb
LAD
4996 chan->sent++;
4997 chan->conn->sent++;
1da177e4
LT
4998 }
4999 }
02b20f0b
LAD
5000
5001 if (cnt != hdev->acl_cnt)
5002 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5003}
5004
6039aa73 5005static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5006{
63d2bc1b 5007 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5008 struct hci_chan *chan;
5009 struct sk_buff *skb;
5010 int quote;
bd1eb66b 5011 u8 type;
b71d385a 5012
63d2bc1b 5013 __check_timeout(hdev, cnt);
b71d385a 5014
bd1eb66b
AE
5015 BT_DBG("%s", hdev->name);
5016
5017 if (hdev->dev_type == HCI_AMP)
5018 type = AMP_LINK;
5019 else
5020 type = ACL_LINK;
5021
b71d385a 5022 while (hdev->block_cnt > 0 &&
bd1eb66b 5023 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5024 u32 priority = (skb_peek(&chan->data_q))->priority;
5025 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5026 int blocks;
5027
5028 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5029 skb->len, skb->priority);
b71d385a
AE
5030
5031 /* Stop if priority has changed */
5032 if (skb->priority < priority)
5033 break;
5034
5035 skb = skb_dequeue(&chan->data_q);
5036
5037 blocks = __get_blocks(hdev, skb);
5038 if (blocks > hdev->block_cnt)
5039 return;
5040
5041 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5042 bt_cb(skb)->force_active);
b71d385a 5043
57d17d70 5044 hci_send_frame(hdev, skb);
b71d385a
AE
5045 hdev->acl_last_tx = jiffies;
5046
5047 hdev->block_cnt -= blocks;
5048 quote -= blocks;
5049
5050 chan->sent += blocks;
5051 chan->conn->sent += blocks;
5052 }
5053 }
5054
5055 if (cnt != hdev->block_cnt)
bd1eb66b 5056 hci_prio_recalculate(hdev, type);
b71d385a
AE
5057}
5058
6039aa73 5059static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5060{
5061 BT_DBG("%s", hdev->name);
5062
bd1eb66b
AE
5063 /* No ACL link over BR/EDR controller */
5064 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5065 return;
5066
5067 /* No AMP link over AMP controller */
5068 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5069 return;
5070
5071 switch (hdev->flow_ctl_mode) {
5072 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5073 hci_sched_acl_pkt(hdev);
5074 break;
5075
5076 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5077 hci_sched_acl_blk(hdev);
5078 break;
5079 }
5080}
5081
1da177e4 5082/* Schedule SCO */
6039aa73 5083static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5084{
5085 struct hci_conn *conn;
5086 struct sk_buff *skb;
5087 int quote;
5088
5089 BT_DBG("%s", hdev->name);
5090
52087a79
LAD
5091 if (!hci_conn_num(hdev, SCO_LINK))
5092 return;
5093
1da177e4
LT
5094 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5095 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5096 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5097 hci_send_frame(hdev, skb);
1da177e4
LT
5098
5099 conn->sent++;
5100 if (conn->sent == ~0)
5101 conn->sent = 0;
5102 }
5103 }
5104}
5105
6039aa73 5106static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5107{
5108 struct hci_conn *conn;
5109 struct sk_buff *skb;
5110 int quote;
5111
5112 BT_DBG("%s", hdev->name);
5113
52087a79
LAD
5114 if (!hci_conn_num(hdev, ESCO_LINK))
5115 return;
5116
8fc9ced3
GP
5117 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5118 &quote))) {
b6a0dc82
MH
5119 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5120 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5121 hci_send_frame(hdev, skb);
b6a0dc82
MH
5122
5123 conn->sent++;
5124 if (conn->sent == ~0)
5125 conn->sent = 0;
5126 }
5127 }
5128}
5129
6039aa73 5130static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5131{
73d80deb 5132 struct hci_chan *chan;
6ed58ec5 5133 struct sk_buff *skb;
02b20f0b 5134 int quote, cnt, tmp;
6ed58ec5
VT
5135
5136 BT_DBG("%s", hdev->name);
5137
52087a79
LAD
5138 if (!hci_conn_num(hdev, LE_LINK))
5139 return;
5140
4a964404 5141 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5142 /* LE tx timeout must be longer than maximum
5143 * link supervision timeout (40.9 seconds) */
bae1f5d9 5144 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5145 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5146 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5147 }
5148
5149 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5150 tmp = cnt;
73d80deb 5151 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5152 u32 priority = (skb_peek(&chan->data_q))->priority;
5153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5155 skb->len, skb->priority);
6ed58ec5 5156
ec1cce24
LAD
5157 /* Stop if priority has changed */
5158 if (skb->priority < priority)
5159 break;
5160
5161 skb = skb_dequeue(&chan->data_q);
5162
57d17d70 5163 hci_send_frame(hdev, skb);
6ed58ec5
VT
5164 hdev->le_last_tx = jiffies;
5165
5166 cnt--;
73d80deb
LAD
5167 chan->sent++;
5168 chan->conn->sent++;
6ed58ec5
VT
5169 }
5170 }
73d80deb 5171
6ed58ec5
VT
5172 if (hdev->le_pkts)
5173 hdev->le_cnt = cnt;
5174 else
5175 hdev->acl_cnt = cnt;
02b20f0b
LAD
5176
5177 if (cnt != tmp)
5178 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5179}
5180
3eff45ea 5181static void hci_tx_work(struct work_struct *work)
1da177e4 5182{
3eff45ea 5183 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5184 struct sk_buff *skb;
5185
6ed58ec5 5186 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5187 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5188
52de599e
MH
5189 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5190 /* Schedule queues and send stuff to HCI driver */
5191 hci_sched_acl(hdev);
5192 hci_sched_sco(hdev);
5193 hci_sched_esco(hdev);
5194 hci_sched_le(hdev);
5195 }
6ed58ec5 5196
1da177e4
LT
5197 /* Send next queued raw (unknown type) packet */
5198 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5199 hci_send_frame(hdev, skb);
1da177e4
LT
5200}
5201
25985edc 5202/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5203
5204/* ACL data packet */
6039aa73 5205static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5206{
5207 struct hci_acl_hdr *hdr = (void *) skb->data;
5208 struct hci_conn *conn;
5209 __u16 handle, flags;
5210
5211 skb_pull(skb, HCI_ACL_HDR_SIZE);
5212
5213 handle = __le16_to_cpu(hdr->handle);
5214 flags = hci_flags(handle);
5215 handle = hci_handle(handle);
5216
f0e09510 5217 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5218 handle, flags);
1da177e4
LT
5219
5220 hdev->stat.acl_rx++;
5221
5222 hci_dev_lock(hdev);
5223 conn = hci_conn_hash_lookup_handle(hdev, handle);
5224 hci_dev_unlock(hdev);
8e87d142 5225
1da177e4 5226 if (conn) {
65983fc7 5227 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5228
1da177e4 5229 /* Send to upper protocol */
686ebf28
UF
5230 l2cap_recv_acldata(conn, skb, flags);
5231 return;
1da177e4 5232 } else {
8e87d142 5233 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5234 hdev->name, handle);
1da177e4
LT
5235 }
5236
5237 kfree_skb(skb);
5238}
5239
5240/* SCO data packet */
6039aa73 5241static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5242{
5243 struct hci_sco_hdr *hdr = (void *) skb->data;
5244 struct hci_conn *conn;
5245 __u16 handle;
5246
5247 skb_pull(skb, HCI_SCO_HDR_SIZE);
5248
5249 handle = __le16_to_cpu(hdr->handle);
5250
f0e09510 5251 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5252
5253 hdev->stat.sco_rx++;
5254
5255 hci_dev_lock(hdev);
5256 conn = hci_conn_hash_lookup_handle(hdev, handle);
5257 hci_dev_unlock(hdev);
5258
5259 if (conn) {
1da177e4 5260 /* Send to upper protocol */
686ebf28
UF
5261 sco_recv_scodata(conn, skb);
5262 return;
1da177e4 5263 } else {
8e87d142 5264 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5265 hdev->name, handle);
1da177e4
LT
5266 }
5267
5268 kfree_skb(skb);
5269}
5270
9238f36a
JH
5271static bool hci_req_is_complete(struct hci_dev *hdev)
5272{
5273 struct sk_buff *skb;
5274
5275 skb = skb_peek(&hdev->cmd_q);
5276 if (!skb)
5277 return true;
5278
5279 return bt_cb(skb)->req.start;
5280}
5281
42c6b129
JH
5282static void hci_resend_last(struct hci_dev *hdev)
5283{
5284 struct hci_command_hdr *sent;
5285 struct sk_buff *skb;
5286 u16 opcode;
5287
5288 if (!hdev->sent_cmd)
5289 return;
5290
5291 sent = (void *) hdev->sent_cmd->data;
5292 opcode = __le16_to_cpu(sent->opcode);
5293 if (opcode == HCI_OP_RESET)
5294 return;
5295
5296 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5297 if (!skb)
5298 return;
5299
5300 skb_queue_head(&hdev->cmd_q, skb);
5301 queue_work(hdev->workqueue, &hdev->cmd_work);
5302}
5303
9238f36a
JH
5304void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5305{
5306 hci_req_complete_t req_complete = NULL;
5307 struct sk_buff *skb;
5308 unsigned long flags;
5309
5310 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5311
42c6b129
JH
5312 /* If the completed command doesn't match the last one that was
5313 * sent we need to do special handling of it.
9238f36a 5314 */
42c6b129
JH
5315 if (!hci_sent_cmd_data(hdev, opcode)) {
5316 /* Some CSR based controllers generate a spontaneous
5317 * reset complete event during init and any pending
5318 * command will never be completed. In such a case we
5319 * need to resend whatever was the last sent
5320 * command.
5321 */
5322 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5323 hci_resend_last(hdev);
5324
9238f36a 5325 return;
42c6b129 5326 }
9238f36a
JH
5327
5328 /* If the command succeeded and there's still more commands in
5329 * this request the request is not yet complete.
5330 */
5331 if (!status && !hci_req_is_complete(hdev))
5332 return;
5333
5334 /* If this was the last command in a request the complete
5335 * callback would be found in hdev->sent_cmd instead of the
5336 * command queue (hdev->cmd_q).
5337 */
5338 if (hdev->sent_cmd) {
5339 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5340
5341 if (req_complete) {
5342 /* We must set the complete callback to NULL to
5343 * avoid calling the callback more than once if
5344 * this function gets called again.
5345 */
5346 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5347
9238f36a 5348 goto call_complete;
53e21fbc 5349 }
9238f36a
JH
5350 }
5351
5352 /* Remove all pending commands belonging to this request */
5353 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5354 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5355 if (bt_cb(skb)->req.start) {
5356 __skb_queue_head(&hdev->cmd_q, skb);
5357 break;
5358 }
5359
5360 req_complete = bt_cb(skb)->req.complete;
5361 kfree_skb(skb);
5362 }
5363 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5364
5365call_complete:
5366 if (req_complete)
5367 req_complete(hdev, status);
5368}
5369
b78752cc 5370static void hci_rx_work(struct work_struct *work)
1da177e4 5371{
b78752cc 5372 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5373 struct sk_buff *skb;
5374
5375 BT_DBG("%s", hdev->name);
5376
1da177e4 5377 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5378 /* Send copy to monitor */
5379 hci_send_to_monitor(hdev, skb);
5380
1da177e4
LT
5381 if (atomic_read(&hdev->promisc)) {
5382 /* Send copy to the sockets */
470fe1b5 5383 hci_send_to_sock(hdev, skb);
1da177e4
LT
5384 }
5385
fee746b0 5386 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5387 kfree_skb(skb);
5388 continue;
5389 }
5390
5391 if (test_bit(HCI_INIT, &hdev->flags)) {
5392 /* Don't process data packets in this states. */
0d48d939 5393 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5394 case HCI_ACLDATA_PKT:
5395 case HCI_SCODATA_PKT:
5396 kfree_skb(skb);
5397 continue;
3ff50b79 5398 }
1da177e4
LT
5399 }
5400
5401 /* Process frame */
0d48d939 5402 switch (bt_cb(skb)->pkt_type) {
1da177e4 5403 case HCI_EVENT_PKT:
b78752cc 5404 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5405 hci_event_packet(hdev, skb);
5406 break;
5407
5408 case HCI_ACLDATA_PKT:
5409 BT_DBG("%s ACL data packet", hdev->name);
5410 hci_acldata_packet(hdev, skb);
5411 break;
5412
5413 case HCI_SCODATA_PKT:
5414 BT_DBG("%s SCO data packet", hdev->name);
5415 hci_scodata_packet(hdev, skb);
5416 break;
5417
5418 default:
5419 kfree_skb(skb);
5420 break;
5421 }
5422 }
1da177e4
LT
5423}
5424
c347b765 5425static void hci_cmd_work(struct work_struct *work)
1da177e4 5426{
c347b765 5427 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5428 struct sk_buff *skb;
5429
2104786b
AE
5430 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5431 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5432
1da177e4 5433 /* Send queued commands */
5a08ecce
AE
5434 if (atomic_read(&hdev->cmd_cnt)) {
5435 skb = skb_dequeue(&hdev->cmd_q);
5436 if (!skb)
5437 return;
5438
7585b97a 5439 kfree_skb(hdev->sent_cmd);
1da177e4 5440
a675d7f1 5441 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5442 if (hdev->sent_cmd) {
1da177e4 5443 atomic_dec(&hdev->cmd_cnt);
57d17d70 5444 hci_send_frame(hdev, skb);
7bdb8a5c 5445 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5446 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5447 else
65cc2b49
MH
5448 schedule_delayed_work(&hdev->cmd_timer,
5449 HCI_CMD_TIMEOUT);
1da177e4
LT
5450 } else {
5451 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5452 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5453 }
5454 }
5455}
b1efcc28
AG
5456
5457void hci_req_add_le_scan_disable(struct hci_request *req)
5458{
5459 struct hci_cp_le_set_scan_enable cp;
5460
5461 memset(&cp, 0, sizeof(cp));
5462 cp.enable = LE_SCAN_DISABLE;
5463 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5464}
a4790dbd 5465
8540f6c0
MH
5466static void add_to_white_list(struct hci_request *req,
5467 struct hci_conn_params *params)
5468{
5469 struct hci_cp_le_add_to_white_list cp;
5470
5471 cp.bdaddr_type = params->addr_type;
5472 bacpy(&cp.bdaddr, &params->addr);
5473
5474 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5475}
5476
5477static u8 update_white_list(struct hci_request *req)
5478{
5479 struct hci_dev *hdev = req->hdev;
5480 struct hci_conn_params *params;
5481 struct bdaddr_list *b;
5482 uint8_t white_list_entries = 0;
5483
5484 /* Go through the current white list programmed into the
5485 * controller one by one and check if that address is still
5486 * in the list of pending connections or list of devices to
5487 * report. If not present in either list, then queue the
5488 * command to remove it from the controller.
5489 */
5490 list_for_each_entry(b, &hdev->le_white_list, list) {
5491 struct hci_cp_le_del_from_white_list cp;
5492
5493 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5494 &b->bdaddr, b->bdaddr_type) ||
5495 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5496 &b->bdaddr, b->bdaddr_type)) {
5497 white_list_entries++;
5498 continue;
5499 }
5500
5501 cp.bdaddr_type = b->bdaddr_type;
5502 bacpy(&cp.bdaddr, &b->bdaddr);
5503
5504 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5505 sizeof(cp), &cp);
5506 }
5507
5508 /* Since all no longer valid white list entries have been
5509 * removed, walk through the list of pending connections
5510 * and ensure that any new device gets programmed into
5511 * the controller.
5512 *
5513 * If the list of the devices is larger than the list of
5514 * available white list entries in the controller, then
5515 * just abort and return filer policy value to not use the
5516 * white list.
5517 */
5518 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5519 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5520 &params->addr, params->addr_type))
5521 continue;
5522
5523 if (white_list_entries >= hdev->le_white_list_size) {
5524 /* Select filter policy to accept all advertising */
5525 return 0x00;
5526 }
5527
66d8e837
MH
5528 if (hci_find_irk_by_addr(hdev, &params->addr,
5529 params->addr_type)) {
5530 /* White list can not be used with RPAs */
5531 return 0x00;
5532 }
5533
8540f6c0
MH
5534 white_list_entries++;
5535 add_to_white_list(req, params);
5536 }
5537
5538 /* After adding all new pending connections, walk through
5539 * the list of pending reports and also add these to the
5540 * white list if there is still space.
5541 */
5542 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5543 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5544 &params->addr, params->addr_type))
5545 continue;
5546
5547 if (white_list_entries >= hdev->le_white_list_size) {
5548 /* Select filter policy to accept all advertising */
5549 return 0x00;
5550 }
5551
66d8e837
MH
5552 if (hci_find_irk_by_addr(hdev, &params->addr,
5553 params->addr_type)) {
5554 /* White list can not be used with RPAs */
5555 return 0x00;
5556 }
5557
8540f6c0
MH
5558 white_list_entries++;
5559 add_to_white_list(req, params);
5560 }
5561
5562 /* Select filter policy to use white list */
5563 return 0x01;
5564}
5565
8ef30fd3
AG
5566void hci_req_add_le_passive_scan(struct hci_request *req)
5567{
5568 struct hci_cp_le_set_scan_param param_cp;
5569 struct hci_cp_le_set_scan_enable enable_cp;
5570 struct hci_dev *hdev = req->hdev;
5571 u8 own_addr_type;
8540f6c0 5572 u8 filter_policy;
8ef30fd3 5573
6ab535a7
MH
5574 /* Set require_privacy to false since no SCAN_REQ are send
5575 * during passive scanning. Not using an unresolvable address
5576 * here is important so that peer devices using direct
5577 * advertising with our address will be correctly reported
5578 * by the controller.
8ef30fd3 5579 */
6ab535a7 5580 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5581 return;
5582
8540f6c0
MH
5583 /* Adding or removing entries from the white list must
5584 * happen before enabling scanning. The controller does
5585 * not allow white list modification while scanning.
5586 */
5587 filter_policy = update_white_list(req);
5588
8ef30fd3
AG
5589 memset(&param_cp, 0, sizeof(param_cp));
5590 param_cp.type = LE_SCAN_PASSIVE;
5591 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5592 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5593 param_cp.own_address_type = own_addr_type;
8540f6c0 5594 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5595 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5596 &param_cp);
5597
5598 memset(&enable_cp, 0, sizeof(enable_cp));
5599 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5600 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5601 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5602 &enable_cp);
5603}
5604
a4790dbd
AG
5605static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5606{
5607 if (status)
5608 BT_DBG("HCI request failed to update background scanning: "
5609 "status 0x%2.2x", status);
5610}
5611
5612/* This function controls the background scanning based on hdev->pend_le_conns
5613 * list. If there are pending LE connection we start the background scanning,
5614 * otherwise we stop it.
5615 *
5616 * This function requires the caller holds hdev->lock.
5617 */
5618void hci_update_background_scan(struct hci_dev *hdev)
5619{
a4790dbd
AG
5620 struct hci_request req;
5621 struct hci_conn *conn;
5622 int err;
5623
c20c02d5
MH
5624 if (!test_bit(HCI_UP, &hdev->flags) ||
5625 test_bit(HCI_INIT, &hdev->flags) ||
5626 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5627 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5628 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5629 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5630 return;
5631
a70f4b5f
JH
5632 /* No point in doing scanning if LE support hasn't been enabled */
5633 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5634 return;
5635
ae23ada4
JH
5636 /* If discovery is active don't interfere with it */
5637 if (hdev->discovery.state != DISCOVERY_STOPPED)
5638 return;
5639
a4790dbd
AG
5640 hci_req_init(&req, hdev);
5641
d1d588c1 5642 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5643 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5644 /* If there is no pending LE connections or devices
5645 * to be scanned for, we should stop the background
5646 * scanning.
a4790dbd
AG
5647 */
5648
5649 /* If controller is not scanning we are done. */
5650 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5651 return;
5652
5653 hci_req_add_le_scan_disable(&req);
5654
5655 BT_DBG("%s stopping background scanning", hdev->name);
5656 } else {
a4790dbd
AG
5657 /* If there is at least one pending LE connection, we should
5658 * keep the background scan running.
5659 */
5660
a4790dbd
AG
5661 /* If controller is connecting, we should not start scanning
5662 * since some controllers are not able to scan and connect at
5663 * the same time.
5664 */
5665 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5666 if (conn)
5667 return;
5668
4340a124
AG
5669 /* If controller is currently scanning, we stop it to ensure we
5670 * don't miss any advertising (due to duplicates filter).
5671 */
5672 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5673 hci_req_add_le_scan_disable(&req);
5674
8ef30fd3 5675 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5676
5677 BT_DBG("%s starting background scanning", hdev->name);
5678 }
5679
5680 err = hci_req_run(&req, update_background_scan_complete);
5681 if (err)
5682 BT_ERR("Failed to run HCI request: err %d", err);
5683}
432df05e 5684
22f433dc
JH
5685static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5686{
5687 struct bdaddr_list *b;
5688
5689 list_for_each_entry(b, &hdev->whitelist, list) {
5690 struct hci_conn *conn;
5691
5692 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5693 if (!conn)
5694 return true;
5695
5696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5697 return true;
5698 }
5699
5700 return false;
5701}
5702
432df05e
JH
5703void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5704{
5705 u8 scan;
5706
5707 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5708 return;
5709
5710 if (!hdev_is_powered(hdev))
5711 return;
5712
5713 if (mgmt_powering_down(hdev))
5714 return;
5715
5716 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5717 disconnected_whitelist_entries(hdev))
432df05e
JH
5718 scan = SCAN_PAGE;
5719 else
5720 scan = SCAN_DISABLED;
5721
5722 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5723 return;
5724
5725 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5726 scan |= SCAN_INQUIRY;
5727
5728 if (req)
5729 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5730 else
5731 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5732}
This page took 2.060199 seconds and 5 git commands to generate.