Bluetooth: Convert LTK list to RCU
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
babdbb3c
MH
302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
041000b9
MH
326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
ebd1e33b
MH
340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
5afeac14
MH
365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
111902f7 371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
111902f7 396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
397 return -EALREADY;
398
111902f7 399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
134c2a89
MH
411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
2bfa3531
MH
429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
2be48b65 437 hdev->idle_timeout = val;
2bfa3531
MH
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
c982b2ea
JH
457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
2bfa3531
MH
488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
2be48b65 496 hdev->sniff_min_interval = val;
2bfa3531
MH
497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
2be48b65 524 hdev->sniff_max_interval = val;
2bfa3531
MH
525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
31ad1691
AK
544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
ac345813
MH
600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
a1f4c318 603 bdaddr_t addr;
ac345813
MH
604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
a1f4c318 608 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 609
a1f4c318 610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 611 16, hdev->irk, &hdev->rpa);
ac345813
MH
612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
7a4cd51d
MH
630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
e7b8fc92
MH
653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
b32bba6c
MH
676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
92202185 679{
b32bba6c
MH
680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
92202185 682
111902f7 683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
687}
688
b32bba6c
MH
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
92202185 692{
b32bba6c
MH
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
92202185 697
b32bba6c
MH
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
92202185 700
b32bba6c
MH
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
706 return -EINVAL;
707
111902f7 708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
709 return -EALREADY;
710
111902f7 711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
712
713 return count;
92202185
MH
714}
715
b32bba6c
MH
716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
92202185 722
d2ab0ac1
MH
723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
3698d704
MH
748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct list_head *p, *n;
752
753 hci_dev_lock(hdev);
754 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
755 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
756 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
757 &irk->bdaddr, irk->addr_type,
758 16, irk->val, &irk->rpa);
759 }
760 hci_dev_unlock(hdev);
761
762 return 0;
763}
764
765static int identity_resolving_keys_open(struct inode *inode, struct file *file)
766{
767 return single_open(file, identity_resolving_keys_show,
768 inode->i_private);
769}
770
771static const struct file_operations identity_resolving_keys_fops = {
772 .open = identity_resolving_keys_open,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
776};
777
8f8625cd
MH
778static int long_term_keys_show(struct seq_file *f, void *ptr)
779{
780 struct hci_dev *hdev = f->private;
970d0f1b 781 struct smp_ltk *ltk;
8f8625cd 782
970d0f1b
JH
783 rcu_read_lock();
784 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 785 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
786 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
787 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 788 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 789 rcu_read_unlock();
8f8625cd
MH
790
791 return 0;
792}
793
794static int long_term_keys_open(struct inode *inode, struct file *file)
795{
796 return single_open(file, long_term_keys_show, inode->i_private);
797}
798
799static const struct file_operations long_term_keys_fops = {
800 .open = long_term_keys_open,
801 .read = seq_read,
802 .llseek = seq_lseek,
803 .release = single_release,
804};
805
4e70c7e7
MH
806static int conn_min_interval_set(void *data, u64 val)
807{
808 struct hci_dev *hdev = data;
809
810 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
811 return -EINVAL;
812
813 hci_dev_lock(hdev);
2be48b65 814 hdev->le_conn_min_interval = val;
4e70c7e7
MH
815 hci_dev_unlock(hdev);
816
817 return 0;
818}
819
820static int conn_min_interval_get(void *data, u64 *val)
821{
822 struct hci_dev *hdev = data;
823
824 hci_dev_lock(hdev);
825 *val = hdev->le_conn_min_interval;
826 hci_dev_unlock(hdev);
827
828 return 0;
829}
830
831DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
832 conn_min_interval_set, "%llu\n");
833
834static int conn_max_interval_set(void *data, u64 val)
835{
836 struct hci_dev *hdev = data;
837
838 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
839 return -EINVAL;
840
841 hci_dev_lock(hdev);
2be48b65 842 hdev->le_conn_max_interval = val;
4e70c7e7
MH
843 hci_dev_unlock(hdev);
844
845 return 0;
846}
847
848static int conn_max_interval_get(void *data, u64 *val)
849{
850 struct hci_dev *hdev = data;
851
852 hci_dev_lock(hdev);
853 *val = hdev->le_conn_max_interval;
854 hci_dev_unlock(hdev);
855
856 return 0;
857}
858
859DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
860 conn_max_interval_set, "%llu\n");
861
816a93d1 862static int conn_latency_set(void *data, u64 val)
3f959d46
MH
863{
864 struct hci_dev *hdev = data;
865
816a93d1 866 if (val > 0x01f3)
3f959d46
MH
867 return -EINVAL;
868
869 hci_dev_lock(hdev);
816a93d1 870 hdev->le_conn_latency = val;
3f959d46
MH
871 hci_dev_unlock(hdev);
872
873 return 0;
874}
875
816a93d1 876static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
877{
878 struct hci_dev *hdev = data;
879
880 hci_dev_lock(hdev);
816a93d1 881 *val = hdev->le_conn_latency;
3f959d46
MH
882 hci_dev_unlock(hdev);
883
884 return 0;
885}
886
816a93d1
MH
887DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
888 conn_latency_set, "%llu\n");
3f959d46 889
f1649577 890static int supervision_timeout_set(void *data, u64 val)
89863109 891{
f1649577 892 struct hci_dev *hdev = data;
89863109 893
f1649577
MH
894 if (val < 0x000a || val > 0x0c80)
895 return -EINVAL;
896
897 hci_dev_lock(hdev);
898 hdev->le_supv_timeout = val;
899 hci_dev_unlock(hdev);
900
901 return 0;
89863109
JR
902}
903
f1649577 904static int supervision_timeout_get(void *data, u64 *val)
89863109 905{
f1649577 906 struct hci_dev *hdev = data;
89863109 907
f1649577
MH
908 hci_dev_lock(hdev);
909 *val = hdev->le_supv_timeout;
910 hci_dev_unlock(hdev);
89863109 911
f1649577
MH
912 return 0;
913}
89863109 914
f1649577
MH
915DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
916 supervision_timeout_set, "%llu\n");
89863109 917
3f959d46
MH
918static int adv_channel_map_set(void *data, u64 val)
919{
920 struct hci_dev *hdev = data;
89863109 921
3f959d46
MH
922 if (val < 0x01 || val > 0x07)
923 return -EINVAL;
89863109 924
3f959d46
MH
925 hci_dev_lock(hdev);
926 hdev->le_adv_channel_map = val;
927 hci_dev_unlock(hdev);
89863109 928
3f959d46
MH
929 return 0;
930}
89863109 931
3f959d46 932static int adv_channel_map_get(void *data, u64 *val)
7d474e06 933{
3f959d46 934 struct hci_dev *hdev = data;
7d474e06
AG
935
936 hci_dev_lock(hdev);
3f959d46
MH
937 *val = hdev->le_adv_channel_map;
938 hci_dev_unlock(hdev);
7d474e06 939
3f959d46
MH
940 return 0;
941}
942
943DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
944 adv_channel_map_set, "%llu\n");
7d474e06 945
729a1051
GL
946static int adv_min_interval_set(void *data, u64 val)
947{
948 struct hci_dev *hdev = data;
949
950 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
951 return -EINVAL;
952
953 hci_dev_lock(hdev);
954 hdev->le_adv_min_interval = val;
7d474e06
AG
955 hci_dev_unlock(hdev);
956
957 return 0;
958}
959
729a1051 960static int adv_min_interval_get(void *data, u64 *val)
7d474e06 961{
729a1051
GL
962 struct hci_dev *hdev = data;
963
964 hci_dev_lock(hdev);
965 *val = hdev->le_adv_min_interval;
966 hci_dev_unlock(hdev);
967
968 return 0;
7d474e06
AG
969}
970
729a1051
GL
971DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
972 adv_min_interval_set, "%llu\n");
973
974static int adv_max_interval_set(void *data, u64 val)
7d474e06 975{
729a1051 976 struct hci_dev *hdev = data;
7d474e06 977
729a1051 978 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
979 return -EINVAL;
980
729a1051
GL
981 hci_dev_lock(hdev);
982 hdev->le_adv_max_interval = val;
983 hci_dev_unlock(hdev);
7d474e06 984
729a1051
GL
985 return 0;
986}
7d474e06 987
729a1051
GL
988static int adv_max_interval_get(void *data, u64 *val)
989{
990 struct hci_dev *hdev = data;
7d474e06 991
729a1051
GL
992 hci_dev_lock(hdev);
993 *val = hdev->le_adv_max_interval;
994 hci_dev_unlock(hdev);
7d474e06 995
729a1051
GL
996 return 0;
997}
7d474e06 998
729a1051
GL
999DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1000 adv_max_interval_set, "%llu\n");
7d474e06 1001
0b3c7d37 1002static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1003{
0b3c7d37 1004 struct hci_dev *hdev = f->private;
7d474e06 1005 struct hci_conn_params *p;
40f4938a 1006 struct bdaddr_list *b;
7d474e06 1007
7d474e06 1008 hci_dev_lock(hdev);
40f4938a
MH
1009 list_for_each_entry(b, &hdev->whitelist, list)
1010 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1011 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1012 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1013 p->auto_connect);
7d474e06 1014 }
7d474e06 1015 hci_dev_unlock(hdev);
7d474e06 1016
7d474e06
AG
1017 return 0;
1018}
7d474e06 1019
0b3c7d37 1020static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1021{
0b3c7d37 1022 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1023}
1024
0b3c7d37
MH
1025static const struct file_operations device_list_fops = {
1026 .open = device_list_open,
7d474e06 1027 .read = seq_read,
7d474e06
AG
1028 .llseek = seq_lseek,
1029 .release = single_release,
1030};
1031
1da177e4
LT
1032/* ---- HCI requests ---- */
1033
42c6b129 1034static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1035{
42c6b129 1036 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1037
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = result;
1040 hdev->req_status = HCI_REQ_DONE;
1041 wake_up_interruptible(&hdev->req_wait_q);
1042 }
1043}
1044
1045static void hci_req_cancel(struct hci_dev *hdev, int err)
1046{
1047 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1048
1049 if (hdev->req_status == HCI_REQ_PEND) {
1050 hdev->req_result = err;
1051 hdev->req_status = HCI_REQ_CANCELED;
1052 wake_up_interruptible(&hdev->req_wait_q);
1053 }
1054}
1055
77a63e0a
FW
1056static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1057 u8 event)
75e84b7c
JH
1058{
1059 struct hci_ev_cmd_complete *ev;
1060 struct hci_event_hdr *hdr;
1061 struct sk_buff *skb;
1062
1063 hci_dev_lock(hdev);
1064
1065 skb = hdev->recv_evt;
1066 hdev->recv_evt = NULL;
1067
1068 hci_dev_unlock(hdev);
1069
1070 if (!skb)
1071 return ERR_PTR(-ENODATA);
1072
1073 if (skb->len < sizeof(*hdr)) {
1074 BT_ERR("Too short HCI event");
1075 goto failed;
1076 }
1077
1078 hdr = (void *) skb->data;
1079 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1080
7b1abbbe
JH
1081 if (event) {
1082 if (hdr->evt != event)
1083 goto failed;
1084 return skb;
1085 }
1086
75e84b7c
JH
1087 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1088 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1089 goto failed;
1090 }
1091
1092 if (skb->len < sizeof(*ev)) {
1093 BT_ERR("Too short cmd_complete event");
1094 goto failed;
1095 }
1096
1097 ev = (void *) skb->data;
1098 skb_pull(skb, sizeof(*ev));
1099
1100 if (opcode == __le16_to_cpu(ev->opcode))
1101 return skb;
1102
1103 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1104 __le16_to_cpu(ev->opcode));
1105
1106failed:
1107 kfree_skb(skb);
1108 return ERR_PTR(-ENODATA);
1109}
1110
7b1abbbe 1111struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1112 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1113{
1114 DECLARE_WAITQUEUE(wait, current);
1115 struct hci_request req;
1116 int err = 0;
1117
1118 BT_DBG("%s", hdev->name);
1119
1120 hci_req_init(&req, hdev);
1121
7b1abbbe 1122 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1123
1124 hdev->req_status = HCI_REQ_PEND;
1125
75e84b7c
JH
1126 add_wait_queue(&hdev->req_wait_q, &wait);
1127 set_current_state(TASK_INTERRUPTIBLE);
1128
039fada5
CP
1129 err = hci_req_run(&req, hci_req_sync_complete);
1130 if (err < 0) {
1131 remove_wait_queue(&hdev->req_wait_q, &wait);
1132 return ERR_PTR(err);
1133 }
1134
75e84b7c
JH
1135 schedule_timeout(timeout);
1136
1137 remove_wait_queue(&hdev->req_wait_q, &wait);
1138
1139 if (signal_pending(current))
1140 return ERR_PTR(-EINTR);
1141
1142 switch (hdev->req_status) {
1143 case HCI_REQ_DONE:
1144 err = -bt_to_errno(hdev->req_result);
1145 break;
1146
1147 case HCI_REQ_CANCELED:
1148 err = -hdev->req_result;
1149 break;
1150
1151 default:
1152 err = -ETIMEDOUT;
1153 break;
1154 }
1155
1156 hdev->req_status = hdev->req_result = 0;
1157
1158 BT_DBG("%s end: err %d", hdev->name, err);
1159
1160 if (err < 0)
1161 return ERR_PTR(err);
1162
7b1abbbe
JH
1163 return hci_get_cmd_complete(hdev, opcode, event);
1164}
1165EXPORT_SYMBOL(__hci_cmd_sync_ev);
1166
1167struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1168 const void *param, u32 timeout)
7b1abbbe
JH
1169{
1170 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1171}
1172EXPORT_SYMBOL(__hci_cmd_sync);
1173
1da177e4 1174/* Execute request and wait for completion. */
01178cd4 1175static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1176 void (*func)(struct hci_request *req,
1177 unsigned long opt),
01178cd4 1178 unsigned long opt, __u32 timeout)
1da177e4 1179{
42c6b129 1180 struct hci_request req;
1da177e4
LT
1181 DECLARE_WAITQUEUE(wait, current);
1182 int err = 0;
1183
1184 BT_DBG("%s start", hdev->name);
1185
42c6b129
JH
1186 hci_req_init(&req, hdev);
1187
1da177e4
LT
1188 hdev->req_status = HCI_REQ_PEND;
1189
42c6b129 1190 func(&req, opt);
53cce22d 1191
039fada5
CP
1192 add_wait_queue(&hdev->req_wait_q, &wait);
1193 set_current_state(TASK_INTERRUPTIBLE);
1194
42c6b129
JH
1195 err = hci_req_run(&req, hci_req_sync_complete);
1196 if (err < 0) {
53cce22d 1197 hdev->req_status = 0;
920c8300 1198
039fada5
CP
1199 remove_wait_queue(&hdev->req_wait_q, &wait);
1200
920c8300
AG
1201 /* ENODATA means the HCI request command queue is empty.
1202 * This can happen when a request with conditionals doesn't
1203 * trigger any commands to be sent. This is normal behavior
1204 * and should not trigger an error return.
42c6b129 1205 */
920c8300
AG
1206 if (err == -ENODATA)
1207 return 0;
1208
1209 return err;
53cce22d
JH
1210 }
1211
1da177e4
LT
1212 schedule_timeout(timeout);
1213
1214 remove_wait_queue(&hdev->req_wait_q, &wait);
1215
1216 if (signal_pending(current))
1217 return -EINTR;
1218
1219 switch (hdev->req_status) {
1220 case HCI_REQ_DONE:
e175072f 1221 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1222 break;
1223
1224 case HCI_REQ_CANCELED:
1225 err = -hdev->req_result;
1226 break;
1227
1228 default:
1229 err = -ETIMEDOUT;
1230 break;
3ff50b79 1231 }
1da177e4 1232
a5040efa 1233 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1234
1235 BT_DBG("%s end: err %d", hdev->name, err);
1236
1237 return err;
1238}
1239
01178cd4 1240static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1241 void (*req)(struct hci_request *req,
1242 unsigned long opt),
01178cd4 1243 unsigned long opt, __u32 timeout)
1da177e4
LT
1244{
1245 int ret;
1246
7c6a329e
MH
1247 if (!test_bit(HCI_UP, &hdev->flags))
1248 return -ENETDOWN;
1249
1da177e4
LT
1250 /* Serialize all requests */
1251 hci_req_lock(hdev);
01178cd4 1252 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1253 hci_req_unlock(hdev);
1254
1255 return ret;
1256}
1257
42c6b129 1258static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1259{
42c6b129 1260 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1261
1262 /* Reset device */
42c6b129
JH
1263 set_bit(HCI_RESET, &req->hdev->flags);
1264 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1265}
1266
42c6b129 1267static void bredr_init(struct hci_request *req)
1da177e4 1268{
42c6b129 1269 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1270
1da177e4 1271 /* Read Local Supported Features */
42c6b129 1272 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1273
1143e5a6 1274 /* Read Local Version */
42c6b129 1275 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1276
1277 /* Read BD Address */
42c6b129 1278 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1279}
1280
42c6b129 1281static void amp_init(struct hci_request *req)
e61ef499 1282{
42c6b129 1283 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1284
e61ef499 1285 /* Read Local Version */
42c6b129 1286 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1287
f6996cfe
MH
1288 /* Read Local Supported Commands */
1289 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1290
1291 /* Read Local Supported Features */
1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
6bcbc489 1294 /* Read Local AMP Info */
42c6b129 1295 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1296
1297 /* Read Data Blk size */
42c6b129 1298 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1299
f38ba941
MH
1300 /* Read Flow Control Mode */
1301 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1302
7528ca1c
MH
1303 /* Read Location Data */
1304 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1305}
1306
42c6b129 1307static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1308{
42c6b129 1309 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1310
1311 BT_DBG("%s %ld", hdev->name, opt);
1312
11778716
AE
1313 /* Reset */
1314 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1315 hci_reset_req(req, 0);
11778716 1316
e61ef499
AE
1317 switch (hdev->dev_type) {
1318 case HCI_BREDR:
42c6b129 1319 bredr_init(req);
e61ef499
AE
1320 break;
1321
1322 case HCI_AMP:
42c6b129 1323 amp_init(req);
e61ef499
AE
1324 break;
1325
1326 default:
1327 BT_ERR("Unknown device type %d", hdev->dev_type);
1328 break;
1329 }
e61ef499
AE
1330}
1331
42c6b129 1332static void bredr_setup(struct hci_request *req)
2177bab5 1333{
4ca048e3
MH
1334 struct hci_dev *hdev = req->hdev;
1335
2177bab5
JH
1336 __le16 param;
1337 __u8 flt_type;
1338
1339 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1340 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1341
1342 /* Read Class of Device */
42c6b129 1343 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1344
1345 /* Read Local Name */
42c6b129 1346 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1347
1348 /* Read Voice Setting */
42c6b129 1349 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1350
b4cb9fb2
MH
1351 /* Read Number of Supported IAC */
1352 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1353
4b836f39
MH
1354 /* Read Current IAC LAP */
1355 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1356
2177bab5
JH
1357 /* Clear Event Filters */
1358 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1359 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1360
1361 /* Connection accept timeout ~20 secs */
dcf4adbf 1362 param = cpu_to_le16(0x7d00);
42c6b129 1363 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1364
4ca048e3
MH
1365 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1366 * but it does not support page scan related HCI commands.
1367 */
1368 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1371 }
2177bab5
JH
1372}
1373
42c6b129 1374static void le_setup(struct hci_request *req)
2177bab5 1375{
c73eee91
JH
1376 struct hci_dev *hdev = req->hdev;
1377
2177bab5 1378 /* Read LE Buffer Size */
42c6b129 1379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1380
1381 /* Read LE Local Supported Features */
42c6b129 1382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1383
747d3f03
MH
1384 /* Read LE Supported States */
1385 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1386
2177bab5 1387 /* Read LE White List Size */
42c6b129 1388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1389
747d3f03
MH
1390 /* Clear LE White List */
1391 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1392
1393 /* LE-only controllers have LE implicitly enabled */
1394 if (!lmp_bredr_capable(hdev))
1395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1396}
1397
1398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1399{
1400 if (lmp_ext_inq_capable(hdev))
1401 return 0x02;
1402
1403 if (lmp_inq_rssi_capable(hdev))
1404 return 0x01;
1405
1406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1407 hdev->lmp_subver == 0x0757)
1408 return 0x01;
1409
1410 if (hdev->manufacturer == 15) {
1411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1412 return 0x01;
1413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1414 return 0x01;
1415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1416 return 0x01;
1417 }
1418
1419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1420 hdev->lmp_subver == 0x1805)
1421 return 0x01;
1422
1423 return 0x00;
1424}
1425
42c6b129 1426static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1427{
1428 u8 mode;
1429
42c6b129 1430 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1431
42c6b129 1432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1433}
1434
42c6b129 1435static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1436{
42c6b129
JH
1437 struct hci_dev *hdev = req->hdev;
1438
2177bab5
JH
1439 /* The second byte is 0xff instead of 0x9f (two reserved bits
1440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1441 * command otherwise.
1442 */
1443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1444
1445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1446 * any event mask for pre 1.2 devices.
1447 */
1448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1449 return;
1450
1451 if (lmp_bredr_capable(hdev)) {
1452 events[4] |= 0x01; /* Flow Specification Complete */
1453 events[4] |= 0x02; /* Inquiry Result with RSSI */
1454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1455 events[5] |= 0x08; /* Synchronous Connection Complete */
1456 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1457 } else {
1458 /* Use a different default for LE-only devices */
1459 memset(events, 0, sizeof(events));
1460 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1461 events[1] |= 0x08; /* Read Remote Version Information Complete */
1462 events[1] |= 0x20; /* Command Complete */
1463 events[1] |= 0x40; /* Command Status */
1464 events[1] |= 0x80; /* Hardware Error */
1465 events[2] |= 0x04; /* Number of Completed Packets */
1466 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1467
1468 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1469 events[0] |= 0x80; /* Encryption Change */
1470 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1471 }
2177bab5
JH
1472 }
1473
1474 if (lmp_inq_rssi_capable(hdev))
1475 events[4] |= 0x02; /* Inquiry Result with RSSI */
1476
1477 if (lmp_sniffsubr_capable(hdev))
1478 events[5] |= 0x20; /* Sniff Subrating */
1479
1480 if (lmp_pause_enc_capable(hdev))
1481 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1482
1483 if (lmp_ext_inq_capable(hdev))
1484 events[5] |= 0x40; /* Extended Inquiry Result */
1485
1486 if (lmp_no_flush_capable(hdev))
1487 events[7] |= 0x01; /* Enhanced Flush Complete */
1488
1489 if (lmp_lsto_capable(hdev))
1490 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1491
1492 if (lmp_ssp_capable(hdev)) {
1493 events[6] |= 0x01; /* IO Capability Request */
1494 events[6] |= 0x02; /* IO Capability Response */
1495 events[6] |= 0x04; /* User Confirmation Request */
1496 events[6] |= 0x08; /* User Passkey Request */
1497 events[6] |= 0x10; /* Remote OOB Data Request */
1498 events[6] |= 0x20; /* Simple Pairing Complete */
1499 events[7] |= 0x04; /* User Passkey Notification */
1500 events[7] |= 0x08; /* Keypress Notification */
1501 events[7] |= 0x10; /* Remote Host Supported
1502 * Features Notification
1503 */
1504 }
1505
1506 if (lmp_le_capable(hdev))
1507 events[7] |= 0x20; /* LE Meta-Event */
1508
42c6b129 1509 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1510}
1511
42c6b129 1512static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1513{
42c6b129
JH
1514 struct hci_dev *hdev = req->hdev;
1515
2177bab5 1516 if (lmp_bredr_capable(hdev))
42c6b129 1517 bredr_setup(req);
56f87901
JH
1518 else
1519 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1520
1521 if (lmp_le_capable(hdev))
42c6b129 1522 le_setup(req);
2177bab5 1523
3f8e2d75
JH
1524 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1525 * local supported commands HCI command.
1526 */
1527 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1528 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1529
1530 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1531 /* When SSP is available, then the host features page
1532 * should also be available as well. However some
1533 * controllers list the max_page as 0 as long as SSP
1534 * has not been enabled. To achieve proper debugging
1535 * output, force the minimum max_page to 1 at least.
1536 */
1537 hdev->max_page = 0x01;
1538
2177bab5
JH
1539 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1540 u8 mode = 0x01;
42c6b129
JH
1541 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1542 sizeof(mode), &mode);
2177bab5
JH
1543 } else {
1544 struct hci_cp_write_eir cp;
1545
1546 memset(hdev->eir, 0, sizeof(hdev->eir));
1547 memset(&cp, 0, sizeof(cp));
1548
42c6b129 1549 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1550 }
1551 }
1552
1553 if (lmp_inq_rssi_capable(hdev))
42c6b129 1554 hci_setup_inquiry_mode(req);
2177bab5
JH
1555
1556 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1557 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1558
1559 if (lmp_ext_feat_capable(hdev)) {
1560 struct hci_cp_read_local_ext_features cp;
1561
1562 cp.page = 0x01;
42c6b129
JH
1563 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1564 sizeof(cp), &cp);
2177bab5
JH
1565 }
1566
1567 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1568 u8 enable = 1;
42c6b129
JH
1569 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1570 &enable);
2177bab5
JH
1571 }
1572}
1573
42c6b129 1574static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1575{
42c6b129 1576 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1577 struct hci_cp_write_def_link_policy cp;
1578 u16 link_policy = 0;
1579
1580 if (lmp_rswitch_capable(hdev))
1581 link_policy |= HCI_LP_RSWITCH;
1582 if (lmp_hold_capable(hdev))
1583 link_policy |= HCI_LP_HOLD;
1584 if (lmp_sniff_capable(hdev))
1585 link_policy |= HCI_LP_SNIFF;
1586 if (lmp_park_capable(hdev))
1587 link_policy |= HCI_LP_PARK;
1588
1589 cp.policy = cpu_to_le16(link_policy);
42c6b129 1590 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1591}
1592
42c6b129 1593static void hci_set_le_support(struct hci_request *req)
2177bab5 1594{
42c6b129 1595 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1596 struct hci_cp_write_le_host_supported cp;
1597
c73eee91
JH
1598 /* LE-only devices do not support explicit enablement */
1599 if (!lmp_bredr_capable(hdev))
1600 return;
1601
2177bab5
JH
1602 memset(&cp, 0, sizeof(cp));
1603
1604 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1605 cp.le = 0x01;
32226e4f 1606 cp.simul = 0x00;
2177bab5
JH
1607 }
1608
1609 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1610 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1611 &cp);
2177bab5
JH
1612}
1613
d62e6d67
JH
1614static void hci_set_event_mask_page_2(struct hci_request *req)
1615{
1616 struct hci_dev *hdev = req->hdev;
1617 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1618
1619 /* If Connectionless Slave Broadcast master role is supported
1620 * enable all necessary events for it.
1621 */
53b834d2 1622 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1623 events[1] |= 0x40; /* Triggered Clock Capture */
1624 events[1] |= 0x80; /* Synchronization Train Complete */
1625 events[2] |= 0x10; /* Slave Page Response Timeout */
1626 events[2] |= 0x20; /* CSB Channel Map Change */
1627 }
1628
1629 /* If Connectionless Slave Broadcast slave role is supported
1630 * enable all necessary events for it.
1631 */
53b834d2 1632 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1633 events[2] |= 0x01; /* Synchronization Train Received */
1634 events[2] |= 0x02; /* CSB Receive */
1635 events[2] |= 0x04; /* CSB Timeout */
1636 events[2] |= 0x08; /* Truncated Page Complete */
1637 }
1638
40c59fcb 1639 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1640 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1641 events[2] |= 0x80;
1642
d62e6d67
JH
1643 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1644}
1645
42c6b129 1646static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1647{
42c6b129 1648 struct hci_dev *hdev = req->hdev;
d2c5d77f 1649 u8 p;
42c6b129 1650
0da71f1b
MH
1651 hci_setup_event_mask(req);
1652
b8f4e068
GP
1653 /* Some Broadcom based Bluetooth controllers do not support the
1654 * Delete Stored Link Key command. They are clearly indicating its
1655 * absence in the bit mask of supported commands.
1656 *
1657 * Check the supported commands and only if the the command is marked
1658 * as supported send it. If not supported assume that the controller
1659 * does not have actual support for stored link keys which makes this
1660 * command redundant anyway.
f9f462fa
MH
1661 *
1662 * Some controllers indicate that they support handling deleting
1663 * stored link keys, but they don't. The quirk lets a driver
1664 * just disable this command.
637b4cae 1665 */
f9f462fa
MH
1666 if (hdev->commands[6] & 0x80 &&
1667 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1668 struct hci_cp_delete_stored_link_key cp;
1669
1670 bacpy(&cp.bdaddr, BDADDR_ANY);
1671 cp.delete_all = 0x01;
1672 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1673 sizeof(cp), &cp);
1674 }
1675
2177bab5 1676 if (hdev->commands[5] & 0x10)
42c6b129 1677 hci_setup_link_policy(req);
2177bab5 1678
9193c6e8
AG
1679 if (lmp_le_capable(hdev)) {
1680 u8 events[8];
1681
1682 memset(events, 0, sizeof(events));
4d6c705b
MH
1683 events[0] = 0x0f;
1684
1685 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1686 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1687
1688 /* If controller supports the Connection Parameters Request
1689 * Link Layer Procedure, enable the corresponding event.
1690 */
1691 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1692 events[0] |= 0x20; /* LE Remote Connection
1693 * Parameter Request
1694 */
1695
9193c6e8
AG
1696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1697 events);
1698
15a49cca
MH
1699 if (hdev->commands[25] & 0x40) {
1700 /* Read LE Advertising Channel TX Power */
1701 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1702 }
1703
42c6b129 1704 hci_set_le_support(req);
9193c6e8 1705 }
d2c5d77f
JH
1706
1707 /* Read features beyond page 1 if available */
1708 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1709 struct hci_cp_read_local_ext_features cp;
1710
1711 cp.page = p;
1712 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1713 sizeof(cp), &cp);
1714 }
2177bab5
JH
1715}
1716
5d4e7e8d
JH
1717static void hci_init4_req(struct hci_request *req, unsigned long opt)
1718{
1719 struct hci_dev *hdev = req->hdev;
1720
d62e6d67
JH
1721 /* Set event mask page 2 if the HCI command for it is supported */
1722 if (hdev->commands[22] & 0x04)
1723 hci_set_event_mask_page_2(req);
1724
109e3191
MH
1725 /* Read local codec list if the HCI command is supported */
1726 if (hdev->commands[29] & 0x20)
1727 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1728
f4fe73ed
MH
1729 /* Get MWS transport configuration if the HCI command is supported */
1730 if (hdev->commands[30] & 0x08)
1731 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1732
5d4e7e8d 1733 /* Check for Synchronization Train support */
53b834d2 1734 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1735 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1736
1737 /* Enable Secure Connections if supported and configured */
5afeac14 1738 if ((lmp_sc_capable(hdev) ||
111902f7 1739 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1740 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1741 u8 support = 0x01;
1742 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1743 sizeof(support), &support);
1744 }
5d4e7e8d
JH
1745}
1746
2177bab5
JH
1747static int __hci_init(struct hci_dev *hdev)
1748{
1749 int err;
1750
1751 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1752 if (err < 0)
1753 return err;
1754
4b4148e9
MH
1755 /* The Device Under Test (DUT) mode is special and available for
1756 * all controller types. So just create it early on.
1757 */
1758 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1759 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1760 &dut_mode_fops);
1761 }
1762
2177bab5
JH
1763 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1764 * BR/EDR/LE type controllers. AMP controllers only need the
1765 * first stage init.
1766 */
1767 if (hdev->dev_type != HCI_BREDR)
1768 return 0;
1769
1770 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1771 if (err < 0)
1772 return err;
1773
5d4e7e8d
JH
1774 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1775 if (err < 0)
1776 return err;
1777
baf27f6e
MH
1778 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1779 if (err < 0)
1780 return err;
1781
1782 /* Only create debugfs entries during the initial setup
1783 * phase and not every time the controller gets powered on.
1784 */
1785 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1786 return 0;
1787
dfb826a8
MH
1788 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1789 &features_fops);
ceeb3bc0
MH
1790 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1791 &hdev->manufacturer);
1792 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1793 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1794 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1795 &device_list_fops);
70afe0b8
MH
1796 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1797 &blacklist_fops);
47219839
MH
1798 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1799
31ad1691
AK
1800 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1801 &conn_info_min_age_fops);
1802 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1803 &conn_info_max_age_fops);
1804
baf27f6e
MH
1805 if (lmp_bredr_capable(hdev)) {
1806 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1807 hdev, &inquiry_cache_fops);
02d08d15
MH
1808 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1809 hdev, &link_keys_fops);
babdbb3c
MH
1810 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1811 hdev, &dev_class_fops);
041000b9
MH
1812 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1813 hdev, &voice_setting_fops);
baf27f6e
MH
1814 }
1815
06f5b778 1816 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1817 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1818 hdev, &auto_accept_delay_fops);
5afeac14
MH
1819 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1820 hdev, &force_sc_support_fops);
134c2a89
MH
1821 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1822 hdev, &sc_only_mode_fops);
06f5b778 1823 }
ebd1e33b 1824
2bfa3531
MH
1825 if (lmp_sniff_capable(hdev)) {
1826 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1827 hdev, &idle_timeout_fops);
1828 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1829 hdev, &sniff_min_interval_fops);
1830 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1831 hdev, &sniff_max_interval_fops);
1832 }
1833
d0f729b8 1834 if (lmp_le_capable(hdev)) {
ac345813
MH
1835 debugfs_create_file("identity", 0400, hdev->debugfs,
1836 hdev, &identity_fops);
1837 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1838 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1839 debugfs_create_file("random_address", 0444, hdev->debugfs,
1840 hdev, &random_address_fops);
b32bba6c
MH
1841 debugfs_create_file("static_address", 0444, hdev->debugfs,
1842 hdev, &static_address_fops);
1843
1844 /* For controllers with a public address, provide a debug
1845 * option to force the usage of the configured static
1846 * address. By default the public address is used.
1847 */
1848 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1849 debugfs_create_file("force_static_address", 0644,
1850 hdev->debugfs, hdev,
1851 &force_static_address_fops);
1852
d0f729b8
MH
1853 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1854 &hdev->le_white_list_size);
d2ab0ac1
MH
1855 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1856 &white_list_fops);
3698d704
MH
1857 debugfs_create_file("identity_resolving_keys", 0400,
1858 hdev->debugfs, hdev,
1859 &identity_resolving_keys_fops);
8f8625cd
MH
1860 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1861 hdev, &long_term_keys_fops);
4e70c7e7
MH
1862 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1863 hdev, &conn_min_interval_fops);
1864 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1865 hdev, &conn_max_interval_fops);
816a93d1
MH
1866 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1867 hdev, &conn_latency_fops);
f1649577
MH
1868 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1869 hdev, &supervision_timeout_fops);
3f959d46
MH
1870 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1871 hdev, &adv_channel_map_fops);
729a1051
GL
1872 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1873 hdev, &adv_min_interval_fops);
1874 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1875 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1876 debugfs_create_u16("discov_interleaved_timeout", 0644,
1877 hdev->debugfs,
1878 &hdev->discov_interleaved_timeout);
54506918 1879
711eafe3 1880 smp_register(hdev);
d0f729b8 1881 }
e7b8fc92 1882
baf27f6e 1883 return 0;
2177bab5
JH
1884}
1885
0ebca7d6
MH
1886static void hci_init0_req(struct hci_request *req, unsigned long opt)
1887{
1888 struct hci_dev *hdev = req->hdev;
1889
1890 BT_DBG("%s %ld", hdev->name, opt);
1891
1892 /* Reset */
1893 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1894 hci_reset_req(req, 0);
1895
1896 /* Read Local Version */
1897 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1898
1899 /* Read BD Address */
1900 if (hdev->set_bdaddr)
1901 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1902}
1903
1904static int __hci_unconf_init(struct hci_dev *hdev)
1905{
1906 int err;
1907
cc78b44b
MH
1908 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1909 return 0;
1910
0ebca7d6
MH
1911 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1912 if (err < 0)
1913 return err;
1914
1915 return 0;
1916}
1917
42c6b129 1918static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1919{
1920 __u8 scan = opt;
1921
42c6b129 1922 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1923
1924 /* Inquiry and Page scans */
42c6b129 1925 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1926}
1927
42c6b129 1928static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1929{
1930 __u8 auth = opt;
1931
42c6b129 1932 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1933
1934 /* Authentication */
42c6b129 1935 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1936}
1937
42c6b129 1938static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1939{
1940 __u8 encrypt = opt;
1941
42c6b129 1942 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1943
e4e8e37c 1944 /* Encryption */
42c6b129 1945 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1946}
1947
42c6b129 1948static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1949{
1950 __le16 policy = cpu_to_le16(opt);
1951
42c6b129 1952 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1953
1954 /* Default link policy */
42c6b129 1955 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1956}
1957
8e87d142 1958/* Get HCI device by index.
1da177e4
LT
1959 * Device is held on return. */
1960struct hci_dev *hci_dev_get(int index)
1961{
8035ded4 1962 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1963
1964 BT_DBG("%d", index);
1965
1966 if (index < 0)
1967 return NULL;
1968
1969 read_lock(&hci_dev_list_lock);
8035ded4 1970 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1971 if (d->id == index) {
1972 hdev = hci_dev_hold(d);
1973 break;
1974 }
1975 }
1976 read_unlock(&hci_dev_list_lock);
1977 return hdev;
1978}
1da177e4
LT
1979
1980/* ---- Inquiry support ---- */
ff9ef578 1981
30dc78e1
JH
1982bool hci_discovery_active(struct hci_dev *hdev)
1983{
1984 struct discovery_state *discov = &hdev->discovery;
1985
6fbe195d 1986 switch (discov->state) {
343f935b 1987 case DISCOVERY_FINDING:
6fbe195d 1988 case DISCOVERY_RESOLVING:
30dc78e1
JH
1989 return true;
1990
6fbe195d
AG
1991 default:
1992 return false;
1993 }
30dc78e1
JH
1994}
1995
ff9ef578
JH
1996void hci_discovery_set_state(struct hci_dev *hdev, int state)
1997{
bb3e0a33
JH
1998 int old_state = hdev->discovery.state;
1999
ff9ef578
JH
2000 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2001
bb3e0a33 2002 if (old_state == state)
ff9ef578
JH
2003 return;
2004
bb3e0a33
JH
2005 hdev->discovery.state = state;
2006
ff9ef578
JH
2007 switch (state) {
2008 case DISCOVERY_STOPPED:
c54c3860
AG
2009 hci_update_background_scan(hdev);
2010
bb3e0a33 2011 if (old_state != DISCOVERY_STARTING)
7b99b659 2012 mgmt_discovering(hdev, 0);
ff9ef578
JH
2013 break;
2014 case DISCOVERY_STARTING:
2015 break;
343f935b 2016 case DISCOVERY_FINDING:
ff9ef578
JH
2017 mgmt_discovering(hdev, 1);
2018 break;
30dc78e1
JH
2019 case DISCOVERY_RESOLVING:
2020 break;
ff9ef578
JH
2021 case DISCOVERY_STOPPING:
2022 break;
2023 }
ff9ef578
JH
2024}
2025
1f9b9a5d 2026void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2027{
30883512 2028 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2029 struct inquiry_entry *p, *n;
1da177e4 2030
561aafbc
JH
2031 list_for_each_entry_safe(p, n, &cache->all, all) {
2032 list_del(&p->all);
b57c1a56 2033 kfree(p);
1da177e4 2034 }
561aafbc
JH
2035
2036 INIT_LIST_HEAD(&cache->unknown);
2037 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2038}
2039
a8c5fb1a
GP
2040struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2041 bdaddr_t *bdaddr)
1da177e4 2042{
30883512 2043 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2044 struct inquiry_entry *e;
2045
6ed93dc6 2046 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2047
561aafbc
JH
2048 list_for_each_entry(e, &cache->all, all) {
2049 if (!bacmp(&e->data.bdaddr, bdaddr))
2050 return e;
2051 }
2052
2053 return NULL;
2054}
2055
2056struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2057 bdaddr_t *bdaddr)
561aafbc 2058{
30883512 2059 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2060 struct inquiry_entry *e;
2061
6ed93dc6 2062 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2063
2064 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2065 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2066 return e;
2067 }
2068
2069 return NULL;
1da177e4
LT
2070}
2071
30dc78e1 2072struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2073 bdaddr_t *bdaddr,
2074 int state)
30dc78e1
JH
2075{
2076 struct discovery_state *cache = &hdev->discovery;
2077 struct inquiry_entry *e;
2078
6ed93dc6 2079 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2080
2081 list_for_each_entry(e, &cache->resolve, list) {
2082 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2083 return e;
2084 if (!bacmp(&e->data.bdaddr, bdaddr))
2085 return e;
2086 }
2087
2088 return NULL;
2089}
2090
a3d4e20a 2091void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2092 struct inquiry_entry *ie)
a3d4e20a
JH
2093{
2094 struct discovery_state *cache = &hdev->discovery;
2095 struct list_head *pos = &cache->resolve;
2096 struct inquiry_entry *p;
2097
2098 list_del(&ie->list);
2099
2100 list_for_each_entry(p, &cache->resolve, list) {
2101 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2102 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2103 break;
2104 pos = &p->list;
2105 }
2106
2107 list_add(&ie->list, pos);
2108}
2109
af58925c
MH
2110u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2111 bool name_known)
1da177e4 2112{
30883512 2113 struct discovery_state *cache = &hdev->discovery;
70f23020 2114 struct inquiry_entry *ie;
af58925c 2115 u32 flags = 0;
1da177e4 2116
6ed93dc6 2117 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2118
2b2fec4d
SJ
2119 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2120
af58925c
MH
2121 if (!data->ssp_mode)
2122 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2123
70f23020 2124 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2125 if (ie) {
af58925c
MH
2126 if (!ie->data.ssp_mode)
2127 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2128
a3d4e20a 2129 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2130 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2131 ie->data.rssi = data->rssi;
2132 hci_inquiry_cache_update_resolve(hdev, ie);
2133 }
2134
561aafbc 2135 goto update;
a3d4e20a 2136 }
561aafbc
JH
2137
2138 /* Entry not in the cache. Add new one. */
27f70f3e 2139 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2140 if (!ie) {
2141 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2142 goto done;
2143 }
561aafbc
JH
2144
2145 list_add(&ie->all, &cache->all);
2146
2147 if (name_known) {
2148 ie->name_state = NAME_KNOWN;
2149 } else {
2150 ie->name_state = NAME_NOT_KNOWN;
2151 list_add(&ie->list, &cache->unknown);
2152 }
70f23020 2153
561aafbc
JH
2154update:
2155 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2156 ie->name_state != NAME_PENDING) {
561aafbc
JH
2157 ie->name_state = NAME_KNOWN;
2158 list_del(&ie->list);
1da177e4
LT
2159 }
2160
70f23020
AE
2161 memcpy(&ie->data, data, sizeof(*data));
2162 ie->timestamp = jiffies;
1da177e4 2163 cache->timestamp = jiffies;
3175405b
JH
2164
2165 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2166 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2167
af58925c
MH
2168done:
2169 return flags;
1da177e4
LT
2170}
2171
2172static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2173{
30883512 2174 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2175 struct inquiry_info *info = (struct inquiry_info *) buf;
2176 struct inquiry_entry *e;
2177 int copied = 0;
2178
561aafbc 2179 list_for_each_entry(e, &cache->all, all) {
1da177e4 2180 struct inquiry_data *data = &e->data;
b57c1a56
JH
2181
2182 if (copied >= num)
2183 break;
2184
1da177e4
LT
2185 bacpy(&info->bdaddr, &data->bdaddr);
2186 info->pscan_rep_mode = data->pscan_rep_mode;
2187 info->pscan_period_mode = data->pscan_period_mode;
2188 info->pscan_mode = data->pscan_mode;
2189 memcpy(info->dev_class, data->dev_class, 3);
2190 info->clock_offset = data->clock_offset;
b57c1a56 2191
1da177e4 2192 info++;
b57c1a56 2193 copied++;
1da177e4
LT
2194 }
2195
2196 BT_DBG("cache %p, copied %d", cache, copied);
2197 return copied;
2198}
2199
42c6b129 2200static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2201{
2202 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2203 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2204 struct hci_cp_inquiry cp;
2205
2206 BT_DBG("%s", hdev->name);
2207
2208 if (test_bit(HCI_INQUIRY, &hdev->flags))
2209 return;
2210
2211 /* Start Inquiry */
2212 memcpy(&cp.lap, &ir->lap, 3);
2213 cp.length = ir->length;
2214 cp.num_rsp = ir->num_rsp;
42c6b129 2215 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2216}
2217
2218int hci_inquiry(void __user *arg)
2219{
2220 __u8 __user *ptr = arg;
2221 struct hci_inquiry_req ir;
2222 struct hci_dev *hdev;
2223 int err = 0, do_inquiry = 0, max_rsp;
2224 long timeo;
2225 __u8 *buf;
2226
2227 if (copy_from_user(&ir, ptr, sizeof(ir)))
2228 return -EFAULT;
2229
5a08ecce
AE
2230 hdev = hci_dev_get(ir.dev_id);
2231 if (!hdev)
1da177e4
LT
2232 return -ENODEV;
2233
0736cfa8
MH
2234 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2235 err = -EBUSY;
2236 goto done;
2237 }
2238
4a964404 2239 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2240 err = -EOPNOTSUPP;
2241 goto done;
2242 }
2243
5b69bef5
MH
2244 if (hdev->dev_type != HCI_BREDR) {
2245 err = -EOPNOTSUPP;
2246 goto done;
2247 }
2248
56f87901
JH
2249 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2250 err = -EOPNOTSUPP;
2251 goto done;
2252 }
2253
09fd0de5 2254 hci_dev_lock(hdev);
8e87d142 2255 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2256 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2257 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2258 do_inquiry = 1;
2259 }
09fd0de5 2260 hci_dev_unlock(hdev);
1da177e4 2261
04837f64 2262 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2263
2264 if (do_inquiry) {
01178cd4
JH
2265 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2266 timeo);
70f23020
AE
2267 if (err < 0)
2268 goto done;
3e13fa1e
AG
2269
2270 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2271 * cleared). If it is interrupted by a signal, return -EINTR.
2272 */
74316201 2273 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2274 TASK_INTERRUPTIBLE))
2275 return -EINTR;
70f23020 2276 }
1da177e4 2277
8fc9ced3
GP
2278 /* for unlimited number of responses we will use buffer with
2279 * 255 entries
2280 */
1da177e4
LT
2281 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2282
2283 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2284 * copy it to the user space.
2285 */
01df8c31 2286 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2287 if (!buf) {
1da177e4
LT
2288 err = -ENOMEM;
2289 goto done;
2290 }
2291
09fd0de5 2292 hci_dev_lock(hdev);
1da177e4 2293 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2294 hci_dev_unlock(hdev);
1da177e4
LT
2295
2296 BT_DBG("num_rsp %d", ir.num_rsp);
2297
2298 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2299 ptr += sizeof(ir);
2300 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2301 ir.num_rsp))
1da177e4 2302 err = -EFAULT;
8e87d142 2303 } else
1da177e4
LT
2304 err = -EFAULT;
2305
2306 kfree(buf);
2307
2308done:
2309 hci_dev_put(hdev);
2310 return err;
2311}
2312
cbed0ca1 2313static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2314{
1da177e4
LT
2315 int ret = 0;
2316
1da177e4
LT
2317 BT_DBG("%s %p", hdev->name, hdev);
2318
2319 hci_req_lock(hdev);
2320
94324962
JH
2321 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2322 ret = -ENODEV;
2323 goto done;
2324 }
2325
d603b76b
MH
2326 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2327 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2328 /* Check for rfkill but allow the HCI setup stage to
2329 * proceed (which in itself doesn't cause any RF activity).
2330 */
2331 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2332 ret = -ERFKILL;
2333 goto done;
2334 }
2335
2336 /* Check for valid public address or a configured static
2337 * random adddress, but let the HCI setup proceed to
2338 * be able to determine if there is a public address
2339 * or not.
2340 *
c6beca0e
MH
2341 * In case of user channel usage, it is not important
2342 * if a public address or static random address is
2343 * available.
2344 *
a5c8f270
MH
2345 * This check is only valid for BR/EDR controllers
2346 * since AMP controllers do not have an address.
2347 */
c6beca0e
MH
2348 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2349 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2350 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2351 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2352 ret = -EADDRNOTAVAIL;
2353 goto done;
2354 }
611b30f7
MH
2355 }
2356
1da177e4
LT
2357 if (test_bit(HCI_UP, &hdev->flags)) {
2358 ret = -EALREADY;
2359 goto done;
2360 }
2361
1da177e4
LT
2362 if (hdev->open(hdev)) {
2363 ret = -EIO;
2364 goto done;
2365 }
2366
f41c70c4
MH
2367 atomic_set(&hdev->cmd_cnt, 1);
2368 set_bit(HCI_INIT, &hdev->flags);
2369
af202f84
MH
2370 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2371 if (hdev->setup)
2372 ret = hdev->setup(hdev);
f41c70c4 2373
af202f84
MH
2374 /* The transport driver can set these quirks before
2375 * creating the HCI device or in its setup callback.
2376 *
2377 * In case any of them is set, the controller has to
2378 * start up as unconfigured.
2379 */
eb1904f4
MH
2380 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2381 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2382 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2383
0ebca7d6
MH
2384 /* For an unconfigured controller it is required to
2385 * read at least the version information provided by
2386 * the Read Local Version Information command.
2387 *
2388 * If the set_bdaddr driver callback is provided, then
2389 * also the original Bluetooth public device address
2390 * will be read using the Read BD Address command.
2391 */
2392 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2393 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2394 }
2395
9713c17b
MH
2396 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2397 /* If public address change is configured, ensure that
2398 * the address gets programmed. If the driver does not
2399 * support changing the public address, fail the power
2400 * on procedure.
2401 */
2402 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2403 hdev->set_bdaddr)
24c457e2
MH
2404 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2405 else
2406 ret = -EADDRNOTAVAIL;
2407 }
2408
f41c70c4 2409 if (!ret) {
4a964404 2410 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2411 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2412 ret = __hci_init(hdev);
1da177e4
LT
2413 }
2414
f41c70c4
MH
2415 clear_bit(HCI_INIT, &hdev->flags);
2416
1da177e4
LT
2417 if (!ret) {
2418 hci_dev_hold(hdev);
d6bfd59c 2419 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2420 set_bit(HCI_UP, &hdev->flags);
2421 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2422 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2423 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2424 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2425 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2426 hdev->dev_type == HCI_BREDR) {
09fd0de5 2427 hci_dev_lock(hdev);
744cf19e 2428 mgmt_powered(hdev, 1);
09fd0de5 2429 hci_dev_unlock(hdev);
56e5cb86 2430 }
8e87d142 2431 } else {
1da177e4 2432 /* Init failed, cleanup */
3eff45ea 2433 flush_work(&hdev->tx_work);
c347b765 2434 flush_work(&hdev->cmd_work);
b78752cc 2435 flush_work(&hdev->rx_work);
1da177e4
LT
2436
2437 skb_queue_purge(&hdev->cmd_q);
2438 skb_queue_purge(&hdev->rx_q);
2439
2440 if (hdev->flush)
2441 hdev->flush(hdev);
2442
2443 if (hdev->sent_cmd) {
2444 kfree_skb(hdev->sent_cmd);
2445 hdev->sent_cmd = NULL;
2446 }
2447
2448 hdev->close(hdev);
fee746b0 2449 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2450 }
2451
2452done:
2453 hci_req_unlock(hdev);
1da177e4
LT
2454 return ret;
2455}
2456
cbed0ca1
JH
2457/* ---- HCI ioctl helpers ---- */
2458
2459int hci_dev_open(__u16 dev)
2460{
2461 struct hci_dev *hdev;
2462 int err;
2463
2464 hdev = hci_dev_get(dev);
2465 if (!hdev)
2466 return -ENODEV;
2467
4a964404 2468 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2469 * up as user channel. Trying to bring them up as normal devices
2470 * will result into a failure. Only user channel operation is
2471 * possible.
2472 *
2473 * When this function is called for a user channel, the flag
2474 * HCI_USER_CHANNEL will be set first before attempting to
2475 * open the device.
2476 */
4a964404 2477 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 err = -EOPNOTSUPP;
2480 goto done;
2481 }
2482
e1d08f40
JH
2483 /* We need to ensure that no other power on/off work is pending
2484 * before proceeding to call hci_dev_do_open. This is
2485 * particularly important if the setup procedure has not yet
2486 * completed.
2487 */
2488 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2489 cancel_delayed_work(&hdev->power_off);
2490
a5c8f270
MH
2491 /* After this call it is guaranteed that the setup procedure
2492 * has finished. This means that error conditions like RFKILL
2493 * or no valid public or static random address apply.
2494 */
e1d08f40
JH
2495 flush_workqueue(hdev->req_workqueue);
2496
12aa4f0a 2497 /* For controllers not using the management interface and that
b6ae8457 2498 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2499 * so that pairing works for them. Once the management interface
2500 * is in use this bit will be cleared again and userspace has
2501 * to explicitly enable it.
2502 */
2503 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2504 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2505 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2506
cbed0ca1
JH
2507 err = hci_dev_do_open(hdev);
2508
fee746b0 2509done:
cbed0ca1 2510 hci_dev_put(hdev);
cbed0ca1
JH
2511 return err;
2512}
2513
d7347f3c
JH
2514/* This function requires the caller holds hdev->lock */
2515static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2516{
2517 struct hci_conn_params *p;
2518
f161dd41
JH
2519 list_for_each_entry(p, &hdev->le_conn_params, list) {
2520 if (p->conn) {
2521 hci_conn_drop(p->conn);
f8aaf9b6 2522 hci_conn_put(p->conn);
f161dd41
JH
2523 p->conn = NULL;
2524 }
d7347f3c 2525 list_del_init(&p->action);
f161dd41 2526 }
d7347f3c
JH
2527
2528 BT_DBG("All LE pending actions cleared");
2529}
2530
1da177e4
LT
2531static int hci_dev_do_close(struct hci_dev *hdev)
2532{
2533 BT_DBG("%s %p", hdev->name, hdev);
2534
78c04c0b
VCG
2535 cancel_delayed_work(&hdev->power_off);
2536
1da177e4
LT
2537 hci_req_cancel(hdev, ENODEV);
2538 hci_req_lock(hdev);
2539
2540 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2541 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2542 hci_req_unlock(hdev);
2543 return 0;
2544 }
2545
3eff45ea
GP
2546 /* Flush RX and TX works */
2547 flush_work(&hdev->tx_work);
b78752cc 2548 flush_work(&hdev->rx_work);
1da177e4 2549
16ab91ab 2550 if (hdev->discov_timeout > 0) {
e0f9309f 2551 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2552 hdev->discov_timeout = 0;
5e5282bb 2553 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2554 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2555 }
2556
a8b2d5c2 2557 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2558 cancel_delayed_work(&hdev->service_cache);
2559
7ba8b4be 2560 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2561
2562 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2563 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2564
09fd0de5 2565 hci_dev_lock(hdev);
1f9b9a5d 2566 hci_inquiry_cache_flush(hdev);
d7347f3c 2567 hci_pend_le_actions_clear(hdev);
f161dd41 2568 hci_conn_hash_flush(hdev);
09fd0de5 2569 hci_dev_unlock(hdev);
1da177e4
LT
2570
2571 hci_notify(hdev, HCI_DEV_DOWN);
2572
2573 if (hdev->flush)
2574 hdev->flush(hdev);
2575
2576 /* Reset device */
2577 skb_queue_purge(&hdev->cmd_q);
2578 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2579 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2580 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2581 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2582 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2583 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2584 clear_bit(HCI_INIT, &hdev->flags);
2585 }
2586
c347b765
GP
2587 /* flush cmd work */
2588 flush_work(&hdev->cmd_work);
1da177e4
LT
2589
2590 /* Drop queues */
2591 skb_queue_purge(&hdev->rx_q);
2592 skb_queue_purge(&hdev->cmd_q);
2593 skb_queue_purge(&hdev->raw_q);
2594
2595 /* Drop last sent command */
2596 if (hdev->sent_cmd) {
65cc2b49 2597 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2598 kfree_skb(hdev->sent_cmd);
2599 hdev->sent_cmd = NULL;
2600 }
2601
b6ddb638
JH
2602 kfree_skb(hdev->recv_evt);
2603 hdev->recv_evt = NULL;
2604
1da177e4
LT
2605 /* After this point our queues are empty
2606 * and no tasks are scheduled. */
2607 hdev->close(hdev);
2608
35b973c9 2609 /* Clear flags */
fee746b0 2610 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2611 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2612
93c311a0
MH
2613 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2614 if (hdev->dev_type == HCI_BREDR) {
2615 hci_dev_lock(hdev);
2616 mgmt_powered(hdev, 0);
2617 hci_dev_unlock(hdev);
2618 }
8ee56540 2619 }
5add6af8 2620
ced5c338 2621 /* Controller radio is available but is currently powered down */
536619e8 2622 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2623
e59fda8d 2624 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2625 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2626 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2627
1da177e4
LT
2628 hci_req_unlock(hdev);
2629
2630 hci_dev_put(hdev);
2631 return 0;
2632}
2633
2634int hci_dev_close(__u16 dev)
2635{
2636 struct hci_dev *hdev;
2637 int err;
2638
70f23020
AE
2639 hdev = hci_dev_get(dev);
2640 if (!hdev)
1da177e4 2641 return -ENODEV;
8ee56540 2642
0736cfa8
MH
2643 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2644 err = -EBUSY;
2645 goto done;
2646 }
2647
8ee56540
MH
2648 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2649 cancel_delayed_work(&hdev->power_off);
2650
1da177e4 2651 err = hci_dev_do_close(hdev);
8ee56540 2652
0736cfa8 2653done:
1da177e4
LT
2654 hci_dev_put(hdev);
2655 return err;
2656}
2657
2658int hci_dev_reset(__u16 dev)
2659{
2660 struct hci_dev *hdev;
2661 int ret = 0;
2662
70f23020
AE
2663 hdev = hci_dev_get(dev);
2664 if (!hdev)
1da177e4
LT
2665 return -ENODEV;
2666
2667 hci_req_lock(hdev);
1da177e4 2668
808a049e
MH
2669 if (!test_bit(HCI_UP, &hdev->flags)) {
2670 ret = -ENETDOWN;
1da177e4 2671 goto done;
808a049e 2672 }
1da177e4 2673
0736cfa8
MH
2674 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2675 ret = -EBUSY;
2676 goto done;
2677 }
2678
4a964404 2679 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2680 ret = -EOPNOTSUPP;
2681 goto done;
2682 }
2683
1da177e4
LT
2684 /* Drop queues */
2685 skb_queue_purge(&hdev->rx_q);
2686 skb_queue_purge(&hdev->cmd_q);
2687
09fd0de5 2688 hci_dev_lock(hdev);
1f9b9a5d 2689 hci_inquiry_cache_flush(hdev);
1da177e4 2690 hci_conn_hash_flush(hdev);
09fd0de5 2691 hci_dev_unlock(hdev);
1da177e4
LT
2692
2693 if (hdev->flush)
2694 hdev->flush(hdev);
2695
8e87d142 2696 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2697 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2698
fee746b0 2699 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2700
2701done:
1da177e4
LT
2702 hci_req_unlock(hdev);
2703 hci_dev_put(hdev);
2704 return ret;
2705}
2706
2707int hci_dev_reset_stat(__u16 dev)
2708{
2709 struct hci_dev *hdev;
2710 int ret = 0;
2711
70f23020
AE
2712 hdev = hci_dev_get(dev);
2713 if (!hdev)
1da177e4
LT
2714 return -ENODEV;
2715
0736cfa8
MH
2716 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2717 ret = -EBUSY;
2718 goto done;
2719 }
2720
4a964404 2721 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2722 ret = -EOPNOTSUPP;
2723 goto done;
2724 }
2725
1da177e4
LT
2726 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2727
0736cfa8 2728done:
1da177e4 2729 hci_dev_put(hdev);
1da177e4
LT
2730 return ret;
2731}
2732
123abc08
JH
2733static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2734{
bc6d2d04 2735 bool conn_changed, discov_changed;
123abc08
JH
2736
2737 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2738
2739 if ((scan & SCAN_PAGE))
2740 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2741 &hdev->dev_flags);
2742 else
2743 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2744 &hdev->dev_flags);
2745
bc6d2d04
JH
2746 if ((scan & SCAN_INQUIRY)) {
2747 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2748 &hdev->dev_flags);
2749 } else {
2750 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2751 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2752 &hdev->dev_flags);
2753 }
2754
123abc08
JH
2755 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2756 return;
2757
bc6d2d04
JH
2758 if (conn_changed || discov_changed) {
2759 /* In case this was disabled through mgmt */
2760 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2761
2762 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2763 mgmt_update_adv_data(hdev);
2764
123abc08 2765 mgmt_new_settings(hdev);
bc6d2d04 2766 }
123abc08
JH
2767}
2768
1da177e4
LT
2769int hci_dev_cmd(unsigned int cmd, void __user *arg)
2770{
2771 struct hci_dev *hdev;
2772 struct hci_dev_req dr;
2773 int err = 0;
2774
2775 if (copy_from_user(&dr, arg, sizeof(dr)))
2776 return -EFAULT;
2777
70f23020
AE
2778 hdev = hci_dev_get(dr.dev_id);
2779 if (!hdev)
1da177e4
LT
2780 return -ENODEV;
2781
0736cfa8
MH
2782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2783 err = -EBUSY;
2784 goto done;
2785 }
2786
4a964404 2787 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2788 err = -EOPNOTSUPP;
2789 goto done;
2790 }
2791
5b69bef5
MH
2792 if (hdev->dev_type != HCI_BREDR) {
2793 err = -EOPNOTSUPP;
2794 goto done;
2795 }
2796
56f87901
JH
2797 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2798 err = -EOPNOTSUPP;
2799 goto done;
2800 }
2801
1da177e4
LT
2802 switch (cmd) {
2803 case HCISETAUTH:
01178cd4
JH
2804 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2805 HCI_INIT_TIMEOUT);
1da177e4
LT
2806 break;
2807
2808 case HCISETENCRYPT:
2809 if (!lmp_encrypt_capable(hdev)) {
2810 err = -EOPNOTSUPP;
2811 break;
2812 }
2813
2814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2815 /* Auth must be enabled first */
01178cd4
JH
2816 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2817 HCI_INIT_TIMEOUT);
1da177e4
LT
2818 if (err)
2819 break;
2820 }
2821
01178cd4
JH
2822 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2823 HCI_INIT_TIMEOUT);
1da177e4
LT
2824 break;
2825
2826 case HCISETSCAN:
01178cd4
JH
2827 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2828 HCI_INIT_TIMEOUT);
91a668b0 2829
bc6d2d04
JH
2830 /* Ensure that the connectable and discoverable states
2831 * get correctly modified as this was a non-mgmt change.
91a668b0 2832 */
123abc08
JH
2833 if (!err)
2834 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2835 break;
2836
1da177e4 2837 case HCISETLINKPOL:
01178cd4
JH
2838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
1da177e4
LT
2840 break;
2841
2842 case HCISETLINKMODE:
e4e8e37c
MH
2843 hdev->link_mode = ((__u16) dr.dev_opt) &
2844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2845 break;
2846
2847 case HCISETPTYPE:
2848 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2849 break;
2850
2851 case HCISETACLMTU:
e4e8e37c
MH
2852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2854 break;
2855
2856 case HCISETSCOMTU:
e4e8e37c
MH
2857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2859 break;
2860
2861 default:
2862 err = -EINVAL;
2863 break;
2864 }
e4e8e37c 2865
0736cfa8 2866done:
1da177e4
LT
2867 hci_dev_put(hdev);
2868 return err;
2869}
2870
2871int hci_get_dev_list(void __user *arg)
2872{
8035ded4 2873 struct hci_dev *hdev;
1da177e4
LT
2874 struct hci_dev_list_req *dl;
2875 struct hci_dev_req *dr;
1da177e4
LT
2876 int n = 0, size, err;
2877 __u16 dev_num;
2878
2879 if (get_user(dev_num, (__u16 __user *) arg))
2880 return -EFAULT;
2881
2882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2883 return -EINVAL;
2884
2885 size = sizeof(*dl) + dev_num * sizeof(*dr);
2886
70f23020
AE
2887 dl = kzalloc(size, GFP_KERNEL);
2888 if (!dl)
1da177e4
LT
2889 return -ENOMEM;
2890
2891 dr = dl->dev_req;
2892
f20d09d5 2893 read_lock(&hci_dev_list_lock);
8035ded4 2894 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2895 unsigned long flags = hdev->flags;
c542a06c 2896
2e84d8db
MH
2897 /* When the auto-off is configured it means the transport
2898 * is running, but in that case still indicate that the
2899 * device is actually down.
2900 */
2901 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2902 flags &= ~BIT(HCI_UP);
c542a06c 2903
1da177e4 2904 (dr + n)->dev_id = hdev->id;
2e84d8db 2905 (dr + n)->dev_opt = flags;
c542a06c 2906
1da177e4
LT
2907 if (++n >= dev_num)
2908 break;
2909 }
f20d09d5 2910 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2911
2912 dl->dev_num = n;
2913 size = sizeof(*dl) + n * sizeof(*dr);
2914
2915 err = copy_to_user(arg, dl, size);
2916 kfree(dl);
2917
2918 return err ? -EFAULT : 0;
2919}
2920
2921int hci_get_dev_info(void __user *arg)
2922{
2923 struct hci_dev *hdev;
2924 struct hci_dev_info di;
2e84d8db 2925 unsigned long flags;
1da177e4
LT
2926 int err = 0;
2927
2928 if (copy_from_user(&di, arg, sizeof(di)))
2929 return -EFAULT;
2930
70f23020
AE
2931 hdev = hci_dev_get(di.dev_id);
2932 if (!hdev)
1da177e4
LT
2933 return -ENODEV;
2934
2e84d8db
MH
2935 /* When the auto-off is configured it means the transport
2936 * is running, but in that case still indicate that the
2937 * device is actually down.
2938 */
2939 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2940 flags = hdev->flags & ~BIT(HCI_UP);
2941 else
2942 flags = hdev->flags;
c542a06c 2943
1da177e4
LT
2944 strcpy(di.name, hdev->name);
2945 di.bdaddr = hdev->bdaddr;
60f2a3ed 2946 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2947 di.flags = flags;
1da177e4 2948 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2949 if (lmp_bredr_capable(hdev)) {
2950 di.acl_mtu = hdev->acl_mtu;
2951 di.acl_pkts = hdev->acl_pkts;
2952 di.sco_mtu = hdev->sco_mtu;
2953 di.sco_pkts = hdev->sco_pkts;
2954 } else {
2955 di.acl_mtu = hdev->le_mtu;
2956 di.acl_pkts = hdev->le_pkts;
2957 di.sco_mtu = 0;
2958 di.sco_pkts = 0;
2959 }
1da177e4
LT
2960 di.link_policy = hdev->link_policy;
2961 di.link_mode = hdev->link_mode;
2962
2963 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2964 memcpy(&di.features, &hdev->features, sizeof(di.features));
2965
2966 if (copy_to_user(arg, &di, sizeof(di)))
2967 err = -EFAULT;
2968
2969 hci_dev_put(hdev);
2970
2971 return err;
2972}
2973
2974/* ---- Interface to HCI drivers ---- */
2975
611b30f7
MH
2976static int hci_rfkill_set_block(void *data, bool blocked)
2977{
2978 struct hci_dev *hdev = data;
2979
2980 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2981
0736cfa8
MH
2982 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2983 return -EBUSY;
2984
5e130367
JH
2985 if (blocked) {
2986 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2987 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2988 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2989 hci_dev_do_close(hdev);
5e130367
JH
2990 } else {
2991 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2992 }
611b30f7
MH
2993
2994 return 0;
2995}
2996
2997static const struct rfkill_ops hci_rfkill_ops = {
2998 .set_block = hci_rfkill_set_block,
2999};
3000
ab81cbf9
JH
3001static void hci_power_on(struct work_struct *work)
3002{
3003 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3004 int err;
ab81cbf9
JH
3005
3006 BT_DBG("%s", hdev->name);
3007
cbed0ca1 3008 err = hci_dev_do_open(hdev);
96570ffc
JH
3009 if (err < 0) {
3010 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3011 return;
96570ffc 3012 }
ab81cbf9 3013
a5c8f270
MH
3014 /* During the HCI setup phase, a few error conditions are
3015 * ignored and they need to be checked now. If they are still
3016 * valid, it is important to turn the device back off.
3017 */
3018 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3019 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3020 (hdev->dev_type == HCI_BREDR &&
3021 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3022 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3023 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3024 hci_dev_do_close(hdev);
3025 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3026 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3027 HCI_AUTO_OFF_TIMEOUT);
bf543036 3028 }
ab81cbf9 3029
fee746b0 3030 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3031 /* For unconfigured devices, set the HCI_RAW flag
3032 * so that userspace can easily identify them.
4a964404
MH
3033 */
3034 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3035 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3036
3037 /* For fully configured devices, this will send
3038 * the Index Added event. For unconfigured devices,
3039 * it will send Unconfigued Index Added event.
3040 *
3041 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3042 * and no event will be send.
3043 */
3044 mgmt_index_added(hdev);
d603b76b 3045 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3046 /* When the controller is now configured, then it
3047 * is important to clear the HCI_RAW flag.
3048 */
3049 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3050 clear_bit(HCI_RAW, &hdev->flags);
3051
d603b76b
MH
3052 /* Powering on the controller with HCI_CONFIG set only
3053 * happens with the transition from unconfigured to
3054 * configured. This will send the Index Added event.
3055 */
744cf19e 3056 mgmt_index_added(hdev);
fee746b0 3057 }
ab81cbf9
JH
3058}
3059
3060static void hci_power_off(struct work_struct *work)
3061{
3243553f 3062 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3063 power_off.work);
ab81cbf9
JH
3064
3065 BT_DBG("%s", hdev->name);
3066
8ee56540 3067 hci_dev_do_close(hdev);
ab81cbf9
JH
3068}
3069
16ab91ab
JH
3070static void hci_discov_off(struct work_struct *work)
3071{
3072 struct hci_dev *hdev;
16ab91ab
JH
3073
3074 hdev = container_of(work, struct hci_dev, discov_off.work);
3075
3076 BT_DBG("%s", hdev->name);
3077
d1967ff8 3078 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3079}
3080
35f7498a 3081void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3082{
4821002c 3083 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3084
4821002c
JH
3085 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3086 list_del(&uuid->list);
2aeb9a1a
JH
3087 kfree(uuid);
3088 }
2aeb9a1a
JH
3089}
3090
35f7498a 3091void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3092{
3093 struct list_head *p, *n;
3094
3095 list_for_each_safe(p, n, &hdev->link_keys) {
3096 struct link_key *key;
3097
3098 key = list_entry(p, struct link_key, list);
3099
3100 list_del(p);
3101 kfree(key);
3102 }
55ed8ca1
JH
3103}
3104
35f7498a 3105void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3106{
970d0f1b 3107 struct smp_ltk *k;
b899efaf 3108
970d0f1b
JH
3109 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3110 list_del_rcu(&k->list);
3111 kfree_rcu(k, rcu);
b899efaf 3112 }
b899efaf
VCG
3113}
3114
970c4e46
JH
3115void hci_smp_irks_clear(struct hci_dev *hdev)
3116{
3117 struct smp_irk *k, *tmp;
3118
3119 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3120 list_del(&k->list);
3121 kfree(k);
3122 }
3123}
3124
55ed8ca1
JH
3125struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3126{
8035ded4 3127 struct link_key *k;
55ed8ca1 3128
8035ded4 3129 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3130 if (bacmp(bdaddr, &k->bdaddr) == 0)
3131 return k;
55ed8ca1
JH
3132
3133 return NULL;
3134}
3135
745c0ce3 3136static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3137 u8 key_type, u8 old_key_type)
d25e28ab
JH
3138{
3139 /* Legacy key */
3140 if (key_type < 0x03)
745c0ce3 3141 return true;
d25e28ab
JH
3142
3143 /* Debug keys are insecure so don't store them persistently */
3144 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3145 return false;
d25e28ab
JH
3146
3147 /* Changed combination key and there's no previous one */
3148 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3149 return false;
d25e28ab
JH
3150
3151 /* Security mode 3 case */
3152 if (!conn)
745c0ce3 3153 return true;
d25e28ab
JH
3154
3155 /* Neither local nor remote side had no-bonding as requirement */
3156 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3157 return true;
d25e28ab
JH
3158
3159 /* Local side had dedicated bonding as requirement */
3160 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3161 return true;
d25e28ab
JH
3162
3163 /* Remote side had dedicated bonding as requirement */
3164 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3165 return true;
d25e28ab
JH
3166
3167 /* If none of the above criteria match, then don't store the key
3168 * persistently */
745c0ce3 3169 return false;
d25e28ab
JH
3170}
3171
e804d25d 3172static u8 ltk_role(u8 type)
98a0b845 3173{
e804d25d
JH
3174 if (type == SMP_LTK)
3175 return HCI_ROLE_MASTER;
98a0b845 3176
e804d25d 3177 return HCI_ROLE_SLAVE;
98a0b845
JH
3178}
3179
fe39c7b2 3180struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3181 u8 role)
75d262c2 3182{
c9839a11 3183 struct smp_ltk *k;
75d262c2 3184
970d0f1b
JH
3185 rcu_read_lock();
3186 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
fe39c7b2 3187 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3188 continue;
3189
e804d25d 3190 if (ltk_role(k->type) != role)
98a0b845
JH
3191 continue;
3192
970d0f1b 3193 rcu_read_unlock();
c9839a11 3194 return k;
75d262c2 3195 }
970d0f1b 3196 rcu_read_unlock();
75d262c2
VCG
3197
3198 return NULL;
3199}
75d262c2 3200
c9839a11 3201struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3202 u8 addr_type, u8 role)
75d262c2 3203{
c9839a11 3204 struct smp_ltk *k;
75d262c2 3205
970d0f1b
JH
3206 rcu_read_lock();
3207 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
c9839a11 3208 if (addr_type == k->bdaddr_type &&
98a0b845 3209 bacmp(bdaddr, &k->bdaddr) == 0 &&
970d0f1b
JH
3210 ltk_role(k->type) == role) {
3211 rcu_read_unlock();
75d262c2 3212 return k;
970d0f1b
JH
3213 }
3214 }
3215 rcu_read_unlock();
75d262c2
VCG
3216
3217 return NULL;
3218}
75d262c2 3219
970c4e46
JH
3220struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3221{
3222 struct smp_irk *irk;
3223
3224 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3225 if (!bacmp(&irk->rpa, rpa))
3226 return irk;
3227 }
3228
3229 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3230 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46
JH
3231 bacpy(&irk->rpa, rpa);
3232 return irk;
3233 }
3234 }
3235
3236 return NULL;
3237}
3238
3239struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 addr_type)
3241{
3242 struct smp_irk *irk;
3243
6cfc9988
JH
3244 /* Identity Address must be public or static random */
3245 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3246 return NULL;
3247
970c4e46
JH
3248 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3249 if (addr_type == irk->addr_type &&
3250 bacmp(bdaddr, &irk->bdaddr) == 0)
3251 return irk;
3252 }
3253
3254 return NULL;
3255}
3256
567fa2aa 3257struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3258 bdaddr_t *bdaddr, u8 *val, u8 type,
3259 u8 pin_len, bool *persistent)
55ed8ca1
JH
3260{
3261 struct link_key *key, *old_key;
745c0ce3 3262 u8 old_key_type;
55ed8ca1
JH
3263
3264 old_key = hci_find_link_key(hdev, bdaddr);
3265 if (old_key) {
3266 old_key_type = old_key->type;
3267 key = old_key;
3268 } else {
12adcf3a 3269 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3270 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3271 if (!key)
567fa2aa 3272 return NULL;
55ed8ca1
JH
3273 list_add(&key->list, &hdev->link_keys);
3274 }
3275
6ed93dc6 3276 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3277
d25e28ab
JH
3278 /* Some buggy controller combinations generate a changed
3279 * combination key for legacy pairing even when there's no
3280 * previous key */
3281 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3282 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3283 type = HCI_LK_COMBINATION;
655fe6ec
JH
3284 if (conn)
3285 conn->key_type = type;
3286 }
d25e28ab 3287
55ed8ca1 3288 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3289 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3290 key->pin_len = pin_len;
3291
b6020ba0 3292 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3293 key->type = old_key_type;
4748fed2
JH
3294 else
3295 key->type = type;
3296
7652ff6a
JH
3297 if (persistent)
3298 *persistent = hci_persistent_key(hdev, conn, type,
3299 old_key_type);
4df378a1 3300
567fa2aa 3301 return key;
55ed8ca1
JH
3302}
3303
ca9142b8 3304struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3305 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3306 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3307{
c9839a11 3308 struct smp_ltk *key, *old_key;
e804d25d 3309 u8 role = ltk_role(type);
75d262c2 3310
e804d25d 3311 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3312 if (old_key)
75d262c2 3313 key = old_key;
c9839a11 3314 else {
0a14ab41 3315 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3316 if (!key)
ca9142b8 3317 return NULL;
970d0f1b 3318 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3319 }
3320
75d262c2 3321 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3322 key->bdaddr_type = addr_type;
3323 memcpy(key->val, tk, sizeof(key->val));
3324 key->authenticated = authenticated;
3325 key->ediv = ediv;
fe39c7b2 3326 key->rand = rand;
c9839a11
VCG
3327 key->enc_size = enc_size;
3328 key->type = type;
75d262c2 3329
ca9142b8 3330 return key;
75d262c2
VCG
3331}
3332
ca9142b8
JH
3333struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3334 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3335{
3336 struct smp_irk *irk;
3337
3338 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3339 if (!irk) {
3340 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3341 if (!irk)
ca9142b8 3342 return NULL;
970c4e46
JH
3343
3344 bacpy(&irk->bdaddr, bdaddr);
3345 irk->addr_type = addr_type;
3346
3347 list_add(&irk->list, &hdev->identity_resolving_keys);
3348 }
3349
3350 memcpy(irk->val, val, 16);
3351 bacpy(&irk->rpa, rpa);
3352
ca9142b8 3353 return irk;
970c4e46
JH
3354}
3355
55ed8ca1
JH
3356int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3357{
3358 struct link_key *key;
3359
3360 key = hci_find_link_key(hdev, bdaddr);
3361 if (!key)
3362 return -ENOENT;
3363
6ed93dc6 3364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3365
3366 list_del(&key->list);
3367 kfree(key);
3368
3369 return 0;
3370}
3371
e0b2b27e 3372int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3373{
970d0f1b 3374 struct smp_ltk *k;
c51ffa0b 3375 int removed = 0;
b899efaf 3376
970d0f1b 3377 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3378 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3379 continue;
3380
6ed93dc6 3381 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3382
970d0f1b
JH
3383 list_del_rcu(&k->list);
3384 kfree_rcu(k, rcu);
c51ffa0b 3385 removed++;
b899efaf
VCG
3386 }
3387
c51ffa0b 3388 return removed ? 0 : -ENOENT;
b899efaf
VCG
3389}
3390
a7ec7338
JH
3391void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3392{
3393 struct smp_irk *k, *tmp;
3394
668b7b19 3395 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3396 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3397 continue;
3398
3399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3400
3401 list_del(&k->list);
3402 kfree(k);
3403 }
3404}
3405
6bd32326 3406/* HCI command timer function */
65cc2b49 3407static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3408{
65cc2b49
MH
3409 struct hci_dev *hdev = container_of(work, struct hci_dev,
3410 cmd_timer.work);
6bd32326 3411
bda4f23a
AE
3412 if (hdev->sent_cmd) {
3413 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3414 u16 opcode = __le16_to_cpu(sent->opcode);
3415
3416 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3417 } else {
3418 BT_ERR("%s command tx timeout", hdev->name);
3419 }
3420
6bd32326 3421 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3422 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3423}
3424
2763eda6 3425struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3426 bdaddr_t *bdaddr)
2763eda6
SJ
3427{
3428 struct oob_data *data;
3429
3430 list_for_each_entry(data, &hdev->remote_oob_data, list)
3431 if (bacmp(bdaddr, &data->bdaddr) == 0)
3432 return data;
3433
3434 return NULL;
3435}
3436
3437int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3438{
3439 struct oob_data *data;
3440
3441 data = hci_find_remote_oob_data(hdev, bdaddr);
3442 if (!data)
3443 return -ENOENT;
3444
6ed93dc6 3445 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3446
3447 list_del(&data->list);
3448 kfree(data);
3449
3450 return 0;
3451}
3452
35f7498a 3453void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3454{
3455 struct oob_data *data, *n;
3456
3457 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3458 list_del(&data->list);
3459 kfree(data);
3460 }
2763eda6
SJ
3461}
3462
0798872e
MH
3463int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3464 u8 *hash, u8 *randomizer)
2763eda6
SJ
3465{
3466 struct oob_data *data;
3467
3468 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3469 if (!data) {
0a14ab41 3470 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3471 if (!data)
3472 return -ENOMEM;
3473
3474 bacpy(&data->bdaddr, bdaddr);
3475 list_add(&data->list, &hdev->remote_oob_data);
3476 }
3477
519ca9d0
MH
3478 memcpy(data->hash192, hash, sizeof(data->hash192));
3479 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3480
0798872e
MH
3481 memset(data->hash256, 0, sizeof(data->hash256));
3482 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3483
3484 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3485
3486 return 0;
3487}
3488
3489int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3490 u8 *hash192, u8 *randomizer192,
3491 u8 *hash256, u8 *randomizer256)
3492{
3493 struct oob_data *data;
3494
3495 data = hci_find_remote_oob_data(hdev, bdaddr);
3496 if (!data) {
0a14ab41 3497 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3498 if (!data)
3499 return -ENOMEM;
3500
3501 bacpy(&data->bdaddr, bdaddr);
3502 list_add(&data->list, &hdev->remote_oob_data);
3503 }
3504
3505 memcpy(data->hash192, hash192, sizeof(data->hash192));
3506 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3507
3508 memcpy(data->hash256, hash256, sizeof(data->hash256));
3509 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3510
6ed93dc6 3511 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3512
3513 return 0;
3514}
3515
dcc36c16 3516struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3517 bdaddr_t *bdaddr, u8 type)
b2a66aad 3518{
8035ded4 3519 struct bdaddr_list *b;
b2a66aad 3520
dcc36c16 3521 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3522 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3523 return b;
b9ee0a78 3524 }
b2a66aad
AJ
3525
3526 return NULL;
3527}
3528
dcc36c16 3529void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3530{
3531 struct list_head *p, *n;
3532
dcc36c16 3533 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3534 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3535
3536 list_del(p);
3537 kfree(b);
3538 }
b2a66aad
AJ
3539}
3540
dcc36c16 3541int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3542{
3543 struct bdaddr_list *entry;
b2a66aad 3544
b9ee0a78 3545 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3546 return -EBADF;
3547
dcc36c16 3548 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3549 return -EEXIST;
b2a66aad 3550
27f70f3e 3551 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3552 if (!entry)
3553 return -ENOMEM;
b2a66aad
AJ
3554
3555 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3556 entry->bdaddr_type = type;
b2a66aad 3557
dcc36c16 3558 list_add(&entry->list, list);
b2a66aad 3559
2a8357f2 3560 return 0;
b2a66aad
AJ
3561}
3562
dcc36c16 3563int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3564{
3565 struct bdaddr_list *entry;
b2a66aad 3566
35f7498a 3567 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3568 hci_bdaddr_list_clear(list);
35f7498a
JH
3569 return 0;
3570 }
b2a66aad 3571
dcc36c16 3572 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3573 if (!entry)
3574 return -ENOENT;
3575
3576 list_del(&entry->list);
3577 kfree(entry);
3578
3579 return 0;
3580}
3581
15819a70
AG
3582/* This function requires the caller holds hdev->lock */
3583struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3584 bdaddr_t *addr, u8 addr_type)
3585{
3586 struct hci_conn_params *params;
3587
738f6185
JH
3588 /* The conn params list only contains identity addresses */
3589 if (!hci_is_identity_address(addr, addr_type))
3590 return NULL;
3591
15819a70
AG
3592 list_for_each_entry(params, &hdev->le_conn_params, list) {
3593 if (bacmp(&params->addr, addr) == 0 &&
3594 params->addr_type == addr_type) {
3595 return params;
3596 }
3597 }
3598
3599 return NULL;
3600}
3601
cef952ce
AG
3602static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3603{
3604 struct hci_conn *conn;
3605
3606 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3607 if (!conn)
3608 return false;
3609
3610 if (conn->dst_type != type)
3611 return false;
3612
3613 if (conn->state != BT_CONNECTED)
3614 return false;
3615
3616 return true;
3617}
3618
4b10966f 3619/* This function requires the caller holds hdev->lock */
501f8827
JH
3620struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3621 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3622{
912b42ef 3623 struct hci_conn_params *param;
a9b0a04c 3624
738f6185
JH
3625 /* The list only contains identity addresses */
3626 if (!hci_is_identity_address(addr, addr_type))
3627 return NULL;
a9b0a04c 3628
501f8827 3629 list_for_each_entry(param, list, action) {
912b42ef
JH
3630 if (bacmp(&param->addr, addr) == 0 &&
3631 param->addr_type == addr_type)
3632 return param;
4b10966f
MH
3633 }
3634
3635 return NULL;
a9b0a04c
AG
3636}
3637
15819a70 3638/* This function requires the caller holds hdev->lock */
51d167c0
MH
3639struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3640 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3641{
3642 struct hci_conn_params *params;
3643
c46245b3 3644 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3645 return NULL;
a9b0a04c 3646
15819a70 3647 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3648 if (params)
51d167c0 3649 return params;
15819a70
AG
3650
3651 params = kzalloc(sizeof(*params), GFP_KERNEL);
3652 if (!params) {
3653 BT_ERR("Out of memory");
51d167c0 3654 return NULL;
15819a70
AG
3655 }
3656
3657 bacpy(&params->addr, addr);
3658 params->addr_type = addr_type;
cef952ce
AG
3659
3660 list_add(&params->list, &hdev->le_conn_params);
93450c75 3661 INIT_LIST_HEAD(&params->action);
cef952ce 3662
bf5b3c8b
MH
3663 params->conn_min_interval = hdev->le_conn_min_interval;
3664 params->conn_max_interval = hdev->le_conn_max_interval;
3665 params->conn_latency = hdev->le_conn_latency;
3666 params->supervision_timeout = hdev->le_supv_timeout;
3667 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3668
3669 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3670
51d167c0 3671 return params;
bf5b3c8b
MH
3672}
3673
3674/* This function requires the caller holds hdev->lock */
3675int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3676 u8 auto_connect)
15819a70
AG
3677{
3678 struct hci_conn_params *params;
3679
8c87aae1
MH
3680 params = hci_conn_params_add(hdev, addr, addr_type);
3681 if (!params)
3682 return -EIO;
cef952ce 3683
42ce26de
JH
3684 if (params->auto_connect == auto_connect)
3685 return 0;
3686
95305baa 3687 list_del_init(&params->action);
15819a70 3688
cef952ce
AG
3689 switch (auto_connect) {
3690 case HCI_AUTO_CONN_DISABLED:
3691 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3692 hci_update_background_scan(hdev);
cef952ce 3693 break;
851efca8 3694 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3695 list_add(&params->action, &hdev->pend_le_reports);
3696 hci_update_background_scan(hdev);
cef952ce 3697 break;
4b9e7e75 3698 case HCI_AUTO_CONN_DIRECT:
cef952ce 3699 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3700 if (!is_connected(hdev, addr, addr_type)) {
3701 list_add(&params->action, &hdev->pend_le_conns);
3702 hci_update_background_scan(hdev);
3703 }
cef952ce
AG
3704 break;
3705 }
15819a70 3706
851efca8
JH
3707 params->auto_connect = auto_connect;
3708
d06b50ce
MH
3709 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3710 auto_connect);
a9b0a04c
AG
3711
3712 return 0;
15819a70
AG
3713}
3714
f6c63249 3715static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3716{
f8aaf9b6 3717 if (params->conn) {
f161dd41 3718 hci_conn_drop(params->conn);
f8aaf9b6
JH
3719 hci_conn_put(params->conn);
3720 }
f161dd41 3721
95305baa 3722 list_del(&params->action);
15819a70
AG
3723 list_del(&params->list);
3724 kfree(params);
f6c63249
JH
3725}
3726
3727/* This function requires the caller holds hdev->lock */
3728void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729{
3730 struct hci_conn_params *params;
3731
3732 params = hci_conn_params_lookup(hdev, addr, addr_type);
3733 if (!params)
3734 return;
3735
3736 hci_conn_params_free(params);
15819a70 3737
95305baa
JH
3738 hci_update_background_scan(hdev);
3739
15819a70
AG
3740 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3741}
3742
3743/* This function requires the caller holds hdev->lock */
55af49a8 3744void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3745{
3746 struct hci_conn_params *params, *tmp;
3747
3748 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3749 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3750 continue;
15819a70
AG
3751 list_del(&params->list);
3752 kfree(params);
3753 }
3754
55af49a8 3755 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3756}
3757
3758/* This function requires the caller holds hdev->lock */
373110c5 3759void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3760{
15819a70 3761 struct hci_conn_params *params, *tmp;
77a77a30 3762
f6c63249
JH
3763 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3764 hci_conn_params_free(params);
77a77a30 3765
a4790dbd 3766 hci_update_background_scan(hdev);
77a77a30 3767
15819a70 3768 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3769}
3770
4c87eaab 3771static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3772{
4c87eaab
AG
3773 if (status) {
3774 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3775
4c87eaab
AG
3776 hci_dev_lock(hdev);
3777 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3778 hci_dev_unlock(hdev);
3779 return;
3780 }
7ba8b4be
AG
3781}
3782
4c87eaab 3783static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3784{
4c87eaab
AG
3785 /* General inquiry access code (GIAC) */
3786 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3787 struct hci_request req;
3788 struct hci_cp_inquiry cp;
7ba8b4be
AG
3789 int err;
3790
4c87eaab
AG
3791 if (status) {
3792 BT_ERR("Failed to disable LE scanning: status %d", status);
3793 return;
3794 }
7ba8b4be 3795
4c87eaab
AG
3796 switch (hdev->discovery.type) {
3797 case DISCOV_TYPE_LE:
3798 hci_dev_lock(hdev);
3799 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3800 hci_dev_unlock(hdev);
3801 break;
7ba8b4be 3802
4c87eaab
AG
3803 case DISCOV_TYPE_INTERLEAVED:
3804 hci_req_init(&req, hdev);
7ba8b4be 3805
4c87eaab
AG
3806 memset(&cp, 0, sizeof(cp));
3807 memcpy(&cp.lap, lap, sizeof(cp.lap));
3808 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3809 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3810
4c87eaab 3811 hci_dev_lock(hdev);
7dbfac1d 3812
4c87eaab 3813 hci_inquiry_cache_flush(hdev);
7dbfac1d 3814
4c87eaab
AG
3815 err = hci_req_run(&req, inquiry_complete);
3816 if (err) {
3817 BT_ERR("Inquiry request failed: err %d", err);
3818 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3819 }
7dbfac1d 3820
4c87eaab
AG
3821 hci_dev_unlock(hdev);
3822 break;
7dbfac1d 3823 }
7dbfac1d
AG
3824}
3825
7ba8b4be
AG
3826static void le_scan_disable_work(struct work_struct *work)
3827{
3828 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3829 le_scan_disable.work);
4c87eaab
AG
3830 struct hci_request req;
3831 int err;
7ba8b4be
AG
3832
3833 BT_DBG("%s", hdev->name);
3834
4c87eaab 3835 hci_req_init(&req, hdev);
28b75a89 3836
b1efcc28 3837 hci_req_add_le_scan_disable(&req);
28b75a89 3838
4c87eaab
AG
3839 err = hci_req_run(&req, le_scan_disable_work_complete);
3840 if (err)
3841 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3842}
3843
8d97250e
JH
3844static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3845{
3846 struct hci_dev *hdev = req->hdev;
3847
3848 /* If we're advertising or initiating an LE connection we can't
3849 * go ahead and change the random address at this time. This is
3850 * because the eventual initiator address used for the
3851 * subsequently created connection will be undefined (some
3852 * controllers use the new address and others the one we had
3853 * when the operation started).
3854 *
3855 * In this kind of scenario skip the update and let the random
3856 * address be updated at the next cycle.
3857 */
5ce194c4 3858 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3859 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3860 BT_DBG("Deferring random address update");
9a783a13 3861 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3862 return;
3863 }
3864
3865 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3866}
3867
94b1fc92
MH
3868int hci_update_random_address(struct hci_request *req, bool require_privacy,
3869 u8 *own_addr_type)
ebd3a747
JH
3870{
3871 struct hci_dev *hdev = req->hdev;
3872 int err;
3873
3874 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3875 * current RPA has expired or there is something else than
3876 * the current RPA in use, then generate a new one.
ebd3a747
JH
3877 */
3878 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3879 int to;
3880
3881 *own_addr_type = ADDR_LE_DEV_RANDOM;
3882
3883 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3884 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3885 return 0;
3886
defce9e8 3887 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3888 if (err < 0) {
3889 BT_ERR("%s failed to generate new RPA", hdev->name);
3890 return err;
3891 }
3892
8d97250e 3893 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3894
3895 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3896 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3897
3898 return 0;
94b1fc92
MH
3899 }
3900
3901 /* In case of required privacy without resolvable private address,
3902 * use an unresolvable private address. This is useful for active
3903 * scanning and non-connectable advertising.
3904 */
3905 if (require_privacy) {
3906 bdaddr_t urpa;
3907
3908 get_random_bytes(&urpa, 6);
3909 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3910
3911 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3912 set_random_addr(req, &urpa);
94b1fc92 3913 return 0;
ebd3a747
JH
3914 }
3915
3916 /* If forcing static address is in use or there is no public
3917 * address use the static address as random address (but skip
3918 * the HCI command if the current random address is already the
3919 * static one.
3920 */
111902f7 3921 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3922 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3923 *own_addr_type = ADDR_LE_DEV_RANDOM;
3924 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3926 &hdev->static_addr);
3927 return 0;
3928 }
3929
3930 /* Neither privacy nor static address is being used so use a
3931 * public address.
3932 */
3933 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3934
3935 return 0;
3936}
3937
a1f4c318
JH
3938/* Copy the Identity Address of the controller.
3939 *
3940 * If the controller has a public BD_ADDR, then by default use that one.
3941 * If this is a LE only controller without a public address, default to
3942 * the static random address.
3943 *
3944 * For debugging purposes it is possible to force controllers with a
3945 * public address to use the static random address instead.
3946 */
3947void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3948 u8 *bdaddr_type)
3949{
111902f7 3950 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3951 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3952 bacpy(bdaddr, &hdev->static_addr);
3953 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3954 } else {
3955 bacpy(bdaddr, &hdev->bdaddr);
3956 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3957 }
3958}
3959
9be0dab7
DH
3960/* Alloc HCI device */
3961struct hci_dev *hci_alloc_dev(void)
3962{
3963 struct hci_dev *hdev;
3964
27f70f3e 3965 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3966 if (!hdev)
3967 return NULL;
3968
b1b813d4
DH
3969 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3970 hdev->esco_type = (ESCO_HV1);
3971 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3972 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3973 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3974 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3975 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3976 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3977
b1b813d4
DH
3978 hdev->sniff_max_interval = 800;
3979 hdev->sniff_min_interval = 80;
3980
3f959d46 3981 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3982 hdev->le_adv_min_interval = 0x0800;
3983 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3984 hdev->le_scan_interval = 0x0060;
3985 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3986 hdev->le_conn_min_interval = 0x0028;
3987 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3988 hdev->le_conn_latency = 0x0000;
3989 hdev->le_supv_timeout = 0x002a;
bef64738 3990
d6bfd59c 3991 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3992 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3993 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3994 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3995
b1b813d4
DH
3996 mutex_init(&hdev->lock);
3997 mutex_init(&hdev->req_lock);
3998
3999 INIT_LIST_HEAD(&hdev->mgmt_pending);
4000 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4001 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4002 INIT_LIST_HEAD(&hdev->uuids);
4003 INIT_LIST_HEAD(&hdev->link_keys);
4004 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4005 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4006 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4007 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4008 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4009 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4010 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4011 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4012
4013 INIT_WORK(&hdev->rx_work, hci_rx_work);
4014 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4015 INIT_WORK(&hdev->tx_work, hci_tx_work);
4016 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4017
b1b813d4
DH
4018 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4019 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4020 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4021
b1b813d4
DH
4022 skb_queue_head_init(&hdev->rx_q);
4023 skb_queue_head_init(&hdev->cmd_q);
4024 skb_queue_head_init(&hdev->raw_q);
4025
4026 init_waitqueue_head(&hdev->req_wait_q);
4027
65cc2b49 4028 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4029
b1b813d4
DH
4030 hci_init_sysfs(hdev);
4031 discovery_init(hdev);
9be0dab7
DH
4032
4033 return hdev;
4034}
4035EXPORT_SYMBOL(hci_alloc_dev);
4036
4037/* Free HCI device */
4038void hci_free_dev(struct hci_dev *hdev)
4039{
9be0dab7
DH
4040 /* will free via device release */
4041 put_device(&hdev->dev);
4042}
4043EXPORT_SYMBOL(hci_free_dev);
4044
1da177e4
LT
4045/* Register HCI device */
4046int hci_register_dev(struct hci_dev *hdev)
4047{
b1b813d4 4048 int id, error;
1da177e4 4049
74292d5a 4050 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4051 return -EINVAL;
4052
08add513
MM
4053 /* Do not allow HCI_AMP devices to register at index 0,
4054 * so the index can be used as the AMP controller ID.
4055 */
3df92b31
SL
4056 switch (hdev->dev_type) {
4057 case HCI_BREDR:
4058 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4059 break;
4060 case HCI_AMP:
4061 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4062 break;
4063 default:
4064 return -EINVAL;
1da177e4 4065 }
8e87d142 4066
3df92b31
SL
4067 if (id < 0)
4068 return id;
4069
1da177e4
LT
4070 sprintf(hdev->name, "hci%d", id);
4071 hdev->id = id;
2d8b3a11
AE
4072
4073 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4074
d8537548
KC
4075 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4076 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4077 if (!hdev->workqueue) {
4078 error = -ENOMEM;
4079 goto err;
4080 }
f48fd9c8 4081
d8537548
KC
4082 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4083 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4084 if (!hdev->req_workqueue) {
4085 destroy_workqueue(hdev->workqueue);
4086 error = -ENOMEM;
4087 goto err;
4088 }
4089
0153e2ec
MH
4090 if (!IS_ERR_OR_NULL(bt_debugfs))
4091 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4092
bdc3e0f1
MH
4093 dev_set_name(&hdev->dev, "%s", hdev->name);
4094
4095 error = device_add(&hdev->dev);
33ca954d 4096 if (error < 0)
54506918 4097 goto err_wqueue;
1da177e4 4098
611b30f7 4099 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4100 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4101 hdev);
611b30f7
MH
4102 if (hdev->rfkill) {
4103 if (rfkill_register(hdev->rfkill) < 0) {
4104 rfkill_destroy(hdev->rfkill);
4105 hdev->rfkill = NULL;
4106 }
4107 }
4108
5e130367
JH
4109 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4110 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4111
a8b2d5c2 4112 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4113 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4114
01cd3404 4115 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4116 /* Assume BR/EDR support until proven otherwise (such as
4117 * through reading supported features during init.
4118 */
4119 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4120 }
ce2be9ac 4121
fcee3377
GP
4122 write_lock(&hci_dev_list_lock);
4123 list_add(&hdev->list, &hci_dev_list);
4124 write_unlock(&hci_dev_list_lock);
4125
4a964404
MH
4126 /* Devices that are marked for raw-only usage are unconfigured
4127 * and should not be included in normal operation.
fee746b0
MH
4128 */
4129 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4130 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4131
1da177e4 4132 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4133 hci_dev_hold(hdev);
1da177e4 4134
19202573 4135 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4136
1da177e4 4137 return id;
f48fd9c8 4138
33ca954d
DH
4139err_wqueue:
4140 destroy_workqueue(hdev->workqueue);
6ead1bbc 4141 destroy_workqueue(hdev->req_workqueue);
33ca954d 4142err:
3df92b31 4143 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4144
33ca954d 4145 return error;
1da177e4
LT
4146}
4147EXPORT_SYMBOL(hci_register_dev);
4148
4149/* Unregister HCI device */
59735631 4150void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4151{
3df92b31 4152 int i, id;
ef222013 4153
c13854ce 4154 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4155
94324962
JH
4156 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4157
3df92b31
SL
4158 id = hdev->id;
4159
f20d09d5 4160 write_lock(&hci_dev_list_lock);
1da177e4 4161 list_del(&hdev->list);
f20d09d5 4162 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4163
4164 hci_dev_do_close(hdev);
4165
cd4c5391 4166 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4167 kfree_skb(hdev->reassembly[i]);
4168
b9b5ef18
GP
4169 cancel_work_sync(&hdev->power_on);
4170
ab81cbf9 4171 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4172 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4173 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4174 hci_dev_lock(hdev);
744cf19e 4175 mgmt_index_removed(hdev);
09fd0de5 4176 hci_dev_unlock(hdev);
56e5cb86 4177 }
ab81cbf9 4178
2e58ef3e
JH
4179 /* mgmt_index_removed should take care of emptying the
4180 * pending list */
4181 BUG_ON(!list_empty(&hdev->mgmt_pending));
4182
1da177e4
LT
4183 hci_notify(hdev, HCI_DEV_UNREG);
4184
611b30f7
MH
4185 if (hdev->rfkill) {
4186 rfkill_unregister(hdev->rfkill);
4187 rfkill_destroy(hdev->rfkill);
4188 }
4189
711eafe3 4190 smp_unregister(hdev);
99780a7b 4191
bdc3e0f1 4192 device_del(&hdev->dev);
147e2d59 4193
0153e2ec
MH
4194 debugfs_remove_recursive(hdev->debugfs);
4195
f48fd9c8 4196 destroy_workqueue(hdev->workqueue);
6ead1bbc 4197 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4198
09fd0de5 4199 hci_dev_lock(hdev);
dcc36c16 4200 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4201 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4202 hci_uuids_clear(hdev);
55ed8ca1 4203 hci_link_keys_clear(hdev);
b899efaf 4204 hci_smp_ltks_clear(hdev);
970c4e46 4205 hci_smp_irks_clear(hdev);
2763eda6 4206 hci_remote_oob_data_clear(hdev);
dcc36c16 4207 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4208 hci_conn_params_clear_all(hdev);
09fd0de5 4209 hci_dev_unlock(hdev);
e2e0cacb 4210
dc946bd8 4211 hci_dev_put(hdev);
3df92b31
SL
4212
4213 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4214}
4215EXPORT_SYMBOL(hci_unregister_dev);
4216
4217/* Suspend HCI device */
4218int hci_suspend_dev(struct hci_dev *hdev)
4219{
4220 hci_notify(hdev, HCI_DEV_SUSPEND);
4221 return 0;
4222}
4223EXPORT_SYMBOL(hci_suspend_dev);
4224
4225/* Resume HCI device */
4226int hci_resume_dev(struct hci_dev *hdev)
4227{
4228 hci_notify(hdev, HCI_DEV_RESUME);
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_resume_dev);
4232
75e0569f
MH
4233/* Reset HCI device */
4234int hci_reset_dev(struct hci_dev *hdev)
4235{
4236 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4237 struct sk_buff *skb;
4238
4239 skb = bt_skb_alloc(3, GFP_ATOMIC);
4240 if (!skb)
4241 return -ENOMEM;
4242
4243 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4244 memcpy(skb_put(skb, 3), hw_err, 3);
4245
4246 /* Send Hardware Error to upper stack */
4247 return hci_recv_frame(hdev, skb);
4248}
4249EXPORT_SYMBOL(hci_reset_dev);
4250
76bca880 4251/* Receive frame from HCI drivers */
e1a26170 4252int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4253{
76bca880 4254 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4255 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4256 kfree_skb(skb);
4257 return -ENXIO;
4258 }
4259
d82603c6 4260 /* Incoming skb */
76bca880
MH
4261 bt_cb(skb)->incoming = 1;
4262
4263 /* Time stamp */
4264 __net_timestamp(skb);
4265
76bca880 4266 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4267 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4268
76bca880
MH
4269 return 0;
4270}
4271EXPORT_SYMBOL(hci_recv_frame);
4272
33e882a5 4273static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4274 int count, __u8 index)
33e882a5
SS
4275{
4276 int len = 0;
4277 int hlen = 0;
4278 int remain = count;
4279 struct sk_buff *skb;
4280 struct bt_skb_cb *scb;
4281
4282 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4283 index >= NUM_REASSEMBLY)
33e882a5
SS
4284 return -EILSEQ;
4285
4286 skb = hdev->reassembly[index];
4287
4288 if (!skb) {
4289 switch (type) {
4290 case HCI_ACLDATA_PKT:
4291 len = HCI_MAX_FRAME_SIZE;
4292 hlen = HCI_ACL_HDR_SIZE;
4293 break;
4294 case HCI_EVENT_PKT:
4295 len = HCI_MAX_EVENT_SIZE;
4296 hlen = HCI_EVENT_HDR_SIZE;
4297 break;
4298 case HCI_SCODATA_PKT:
4299 len = HCI_MAX_SCO_SIZE;
4300 hlen = HCI_SCO_HDR_SIZE;
4301 break;
4302 }
4303
1e429f38 4304 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4305 if (!skb)
4306 return -ENOMEM;
4307
4308 scb = (void *) skb->cb;
4309 scb->expect = hlen;
4310 scb->pkt_type = type;
4311
33e882a5
SS
4312 hdev->reassembly[index] = skb;
4313 }
4314
4315 while (count) {
4316 scb = (void *) skb->cb;
89bb46d0 4317 len = min_t(uint, scb->expect, count);
33e882a5
SS
4318
4319 memcpy(skb_put(skb, len), data, len);
4320
4321 count -= len;
4322 data += len;
4323 scb->expect -= len;
4324 remain = count;
4325
4326 switch (type) {
4327 case HCI_EVENT_PKT:
4328 if (skb->len == HCI_EVENT_HDR_SIZE) {
4329 struct hci_event_hdr *h = hci_event_hdr(skb);
4330 scb->expect = h->plen;
4331
4332 if (skb_tailroom(skb) < scb->expect) {
4333 kfree_skb(skb);
4334 hdev->reassembly[index] = NULL;
4335 return -ENOMEM;
4336 }
4337 }
4338 break;
4339
4340 case HCI_ACLDATA_PKT:
4341 if (skb->len == HCI_ACL_HDR_SIZE) {
4342 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4343 scb->expect = __le16_to_cpu(h->dlen);
4344
4345 if (skb_tailroom(skb) < scb->expect) {
4346 kfree_skb(skb);
4347 hdev->reassembly[index] = NULL;
4348 return -ENOMEM;
4349 }
4350 }
4351 break;
4352
4353 case HCI_SCODATA_PKT:
4354 if (skb->len == HCI_SCO_HDR_SIZE) {
4355 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4356 scb->expect = h->dlen;
4357
4358 if (skb_tailroom(skb) < scb->expect) {
4359 kfree_skb(skb);
4360 hdev->reassembly[index] = NULL;
4361 return -ENOMEM;
4362 }
4363 }
4364 break;
4365 }
4366
4367 if (scb->expect == 0) {
4368 /* Complete frame */
4369
4370 bt_cb(skb)->pkt_type = type;
e1a26170 4371 hci_recv_frame(hdev, skb);
33e882a5
SS
4372
4373 hdev->reassembly[index] = NULL;
4374 return remain;
4375 }
4376 }
4377
4378 return remain;
4379}
4380
99811510
SS
4381#define STREAM_REASSEMBLY 0
4382
4383int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4384{
4385 int type;
4386 int rem = 0;
4387
da5f6c37 4388 while (count) {
99811510
SS
4389 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4390
4391 if (!skb) {
4392 struct { char type; } *pkt;
4393
4394 /* Start of the frame */
4395 pkt = data;
4396 type = pkt->type;
4397
4398 data++;
4399 count--;
4400 } else
4401 type = bt_cb(skb)->pkt_type;
4402
1e429f38 4403 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4404 STREAM_REASSEMBLY);
99811510
SS
4405 if (rem < 0)
4406 return rem;
4407
4408 data += (count - rem);
4409 count = rem;
f81c6224 4410 }
99811510
SS
4411
4412 return rem;
4413}
4414EXPORT_SYMBOL(hci_recv_stream_fragment);
4415
1da177e4
LT
4416/* ---- Interface to upper protocols ---- */
4417
1da177e4
LT
4418int hci_register_cb(struct hci_cb *cb)
4419{
4420 BT_DBG("%p name %s", cb, cb->name);
4421
f20d09d5 4422 write_lock(&hci_cb_list_lock);
1da177e4 4423 list_add(&cb->list, &hci_cb_list);
f20d09d5 4424 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4425
4426 return 0;
4427}
4428EXPORT_SYMBOL(hci_register_cb);
4429
4430int hci_unregister_cb(struct hci_cb *cb)
4431{
4432 BT_DBG("%p name %s", cb, cb->name);
4433
f20d09d5 4434 write_lock(&hci_cb_list_lock);
1da177e4 4435 list_del(&cb->list);
f20d09d5 4436 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4437
4438 return 0;
4439}
4440EXPORT_SYMBOL(hci_unregister_cb);
4441
51086991 4442static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4443{
cdc52faa
MH
4444 int err;
4445
0d48d939 4446 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4447
cd82e61c
MH
4448 /* Time stamp */
4449 __net_timestamp(skb);
1da177e4 4450
cd82e61c
MH
4451 /* Send copy to monitor */
4452 hci_send_to_monitor(hdev, skb);
4453
4454 if (atomic_read(&hdev->promisc)) {
4455 /* Send copy to the sockets */
470fe1b5 4456 hci_send_to_sock(hdev, skb);
1da177e4
LT
4457 }
4458
4459 /* Get rid of skb owner, prior to sending to the driver. */
4460 skb_orphan(skb);
4461
cdc52faa
MH
4462 err = hdev->send(hdev, skb);
4463 if (err < 0) {
4464 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4465 kfree_skb(skb);
4466 }
1da177e4
LT
4467}
4468
3119ae95
JH
4469void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4470{
4471 skb_queue_head_init(&req->cmd_q);
4472 req->hdev = hdev;
5d73e034 4473 req->err = 0;
3119ae95
JH
4474}
4475
4476int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4477{
4478 struct hci_dev *hdev = req->hdev;
4479 struct sk_buff *skb;
4480 unsigned long flags;
4481
4482 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4483
49c922bb 4484 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4485 * commands queued on the HCI request queue.
4486 */
4487 if (req->err) {
4488 skb_queue_purge(&req->cmd_q);
4489 return req->err;
4490 }
4491
3119ae95
JH
4492 /* Do not allow empty requests */
4493 if (skb_queue_empty(&req->cmd_q))
382b0c39 4494 return -ENODATA;
3119ae95
JH
4495
4496 skb = skb_peek_tail(&req->cmd_q);
4497 bt_cb(skb)->req.complete = complete;
4498
4499 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4500 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4501 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4502
4503 queue_work(hdev->workqueue, &hdev->cmd_work);
4504
4505 return 0;
4506}
4507
899de765
MH
4508bool hci_req_pending(struct hci_dev *hdev)
4509{
4510 return (hdev->req_status == HCI_REQ_PEND);
4511}
4512
1ca3a9d0 4513static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4514 u32 plen, const void *param)
1da177e4
LT
4515{
4516 int len = HCI_COMMAND_HDR_SIZE + plen;
4517 struct hci_command_hdr *hdr;
4518 struct sk_buff *skb;
4519
1da177e4 4520 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4521 if (!skb)
4522 return NULL;
1da177e4
LT
4523
4524 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4525 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4526 hdr->plen = plen;
4527
4528 if (plen)
4529 memcpy(skb_put(skb, plen), param, plen);
4530
4531 BT_DBG("skb len %d", skb->len);
4532
0d48d939 4533 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4534 bt_cb(skb)->opcode = opcode;
c78ae283 4535
1ca3a9d0
JH
4536 return skb;
4537}
4538
4539/* Send HCI command */
07dc93dd
JH
4540int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4541 const void *param)
1ca3a9d0
JH
4542{
4543 struct sk_buff *skb;
4544
4545 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4546
4547 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4548 if (!skb) {
4549 BT_ERR("%s no memory for command", hdev->name);
4550 return -ENOMEM;
4551 }
4552
49c922bb 4553 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4554 * single-command requests.
4555 */
4556 bt_cb(skb)->req.start = true;
4557
1da177e4 4558 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4559 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4560
4561 return 0;
4562}
1da177e4 4563
71c76a17 4564/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4565void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4566 const void *param, u8 event)
71c76a17
JH
4567{
4568 struct hci_dev *hdev = req->hdev;
4569 struct sk_buff *skb;
4570
4571 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4572
49c922bb 4573 /* If an error occurred during request building, there is no point in
34739c1e
AG
4574 * queueing the HCI command. We can simply return.
4575 */
4576 if (req->err)
4577 return;
4578
71c76a17
JH
4579 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4580 if (!skb) {
5d73e034
AG
4581 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4582 hdev->name, opcode);
4583 req->err = -ENOMEM;
e348fe6b 4584 return;
71c76a17
JH
4585 }
4586
4587 if (skb_queue_empty(&req->cmd_q))
4588 bt_cb(skb)->req.start = true;
4589
02350a72
JH
4590 bt_cb(skb)->req.event = event;
4591
71c76a17 4592 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4593}
4594
07dc93dd
JH
4595void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4596 const void *param)
02350a72
JH
4597{
4598 hci_req_add_ev(req, opcode, plen, param, 0);
4599}
4600
1da177e4 4601/* Get data from the previously sent command */
a9de9248 4602void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4603{
4604 struct hci_command_hdr *hdr;
4605
4606 if (!hdev->sent_cmd)
4607 return NULL;
4608
4609 hdr = (void *) hdev->sent_cmd->data;
4610
a9de9248 4611 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4612 return NULL;
4613
f0e09510 4614 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4615
4616 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4617}
4618
4619/* Send ACL data */
4620static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4621{
4622 struct hci_acl_hdr *hdr;
4623 int len = skb->len;
4624
badff6d0
ACM
4625 skb_push(skb, HCI_ACL_HDR_SIZE);
4626 skb_reset_transport_header(skb);
9c70220b 4627 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4628 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4629 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4630}
4631
ee22be7e 4632static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4633 struct sk_buff *skb, __u16 flags)
1da177e4 4634{
ee22be7e 4635 struct hci_conn *conn = chan->conn;
1da177e4
LT
4636 struct hci_dev *hdev = conn->hdev;
4637 struct sk_buff *list;
4638
087bfd99
GP
4639 skb->len = skb_headlen(skb);
4640 skb->data_len = 0;
4641
4642 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4643
4644 switch (hdev->dev_type) {
4645 case HCI_BREDR:
4646 hci_add_acl_hdr(skb, conn->handle, flags);
4647 break;
4648 case HCI_AMP:
4649 hci_add_acl_hdr(skb, chan->handle, flags);
4650 break;
4651 default:
4652 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4653 return;
4654 }
087bfd99 4655
70f23020
AE
4656 list = skb_shinfo(skb)->frag_list;
4657 if (!list) {
1da177e4
LT
4658 /* Non fragmented */
4659 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4660
73d80deb 4661 skb_queue_tail(queue, skb);
1da177e4
LT
4662 } else {
4663 /* Fragmented */
4664 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4665
4666 skb_shinfo(skb)->frag_list = NULL;
4667
9cfd5a23
JR
4668 /* Queue all fragments atomically. We need to use spin_lock_bh
4669 * here because of 6LoWPAN links, as there this function is
4670 * called from softirq and using normal spin lock could cause
4671 * deadlocks.
4672 */
4673 spin_lock_bh(&queue->lock);
1da177e4 4674
73d80deb 4675 __skb_queue_tail(queue, skb);
e702112f
AE
4676
4677 flags &= ~ACL_START;
4678 flags |= ACL_CONT;
1da177e4
LT
4679 do {
4680 skb = list; list = list->next;
8e87d142 4681
0d48d939 4682 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4683 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4684
4685 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4686
73d80deb 4687 __skb_queue_tail(queue, skb);
1da177e4
LT
4688 } while (list);
4689
9cfd5a23 4690 spin_unlock_bh(&queue->lock);
1da177e4 4691 }
73d80deb
LAD
4692}
4693
4694void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4695{
ee22be7e 4696 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4697
f0e09510 4698 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4699
ee22be7e 4700 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4701
3eff45ea 4702 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4703}
1da177e4
LT
4704
4705/* Send SCO data */
0d861d8b 4706void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4707{
4708 struct hci_dev *hdev = conn->hdev;
4709 struct hci_sco_hdr hdr;
4710
4711 BT_DBG("%s len %d", hdev->name, skb->len);
4712
aca3192c 4713 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4714 hdr.dlen = skb->len;
4715
badff6d0
ACM
4716 skb_push(skb, HCI_SCO_HDR_SIZE);
4717 skb_reset_transport_header(skb);
9c70220b 4718 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4719
0d48d939 4720 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4721
1da177e4 4722 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4723 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4724}
1da177e4
LT
4725
4726/* ---- HCI TX task (outgoing data) ---- */
4727
4728/* HCI Connection scheduler */
6039aa73
GP
4729static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4730 int *quote)
1da177e4
LT
4731{
4732 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4733 struct hci_conn *conn = NULL, *c;
abc5de8f 4734 unsigned int num = 0, min = ~0;
1da177e4 4735
8e87d142 4736 /* We don't have to lock device here. Connections are always
1da177e4 4737 * added and removed with TX task disabled. */
bf4c6325
GP
4738
4739 rcu_read_lock();
4740
4741 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4742 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4743 continue;
769be974
MH
4744
4745 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4746 continue;
4747
1da177e4
LT
4748 num++;
4749
4750 if (c->sent < min) {
4751 min = c->sent;
4752 conn = c;
4753 }
52087a79
LAD
4754
4755 if (hci_conn_num(hdev, type) == num)
4756 break;
1da177e4
LT
4757 }
4758
bf4c6325
GP
4759 rcu_read_unlock();
4760
1da177e4 4761 if (conn) {
6ed58ec5
VT
4762 int cnt, q;
4763
4764 switch (conn->type) {
4765 case ACL_LINK:
4766 cnt = hdev->acl_cnt;
4767 break;
4768 case SCO_LINK:
4769 case ESCO_LINK:
4770 cnt = hdev->sco_cnt;
4771 break;
4772 case LE_LINK:
4773 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4774 break;
4775 default:
4776 cnt = 0;
4777 BT_ERR("Unknown link type");
4778 }
4779
4780 q = cnt / num;
1da177e4
LT
4781 *quote = q ? q : 1;
4782 } else
4783 *quote = 0;
4784
4785 BT_DBG("conn %p quote %d", conn, *quote);
4786 return conn;
4787}
4788
6039aa73 4789static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4790{
4791 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4792 struct hci_conn *c;
1da177e4 4793
bae1f5d9 4794 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4795
bf4c6325
GP
4796 rcu_read_lock();
4797
1da177e4 4798 /* Kill stalled connections */
bf4c6325 4799 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4800 if (c->type == type && c->sent) {
6ed93dc6
AE
4801 BT_ERR("%s killing stalled connection %pMR",
4802 hdev->name, &c->dst);
bed71748 4803 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4804 }
4805 }
bf4c6325
GP
4806
4807 rcu_read_unlock();
1da177e4
LT
4808}
4809
6039aa73
GP
4810static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4811 int *quote)
1da177e4 4812{
73d80deb
LAD
4813 struct hci_conn_hash *h = &hdev->conn_hash;
4814 struct hci_chan *chan = NULL;
abc5de8f 4815 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4816 struct hci_conn *conn;
73d80deb
LAD
4817 int cnt, q, conn_num = 0;
4818
4819 BT_DBG("%s", hdev->name);
4820
bf4c6325
GP
4821 rcu_read_lock();
4822
4823 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4824 struct hci_chan *tmp;
4825
4826 if (conn->type != type)
4827 continue;
4828
4829 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4830 continue;
4831
4832 conn_num++;
4833
8192edef 4834 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4835 struct sk_buff *skb;
4836
4837 if (skb_queue_empty(&tmp->data_q))
4838 continue;
4839
4840 skb = skb_peek(&tmp->data_q);
4841 if (skb->priority < cur_prio)
4842 continue;
4843
4844 if (skb->priority > cur_prio) {
4845 num = 0;
4846 min = ~0;
4847 cur_prio = skb->priority;
4848 }
4849
4850 num++;
4851
4852 if (conn->sent < min) {
4853 min = conn->sent;
4854 chan = tmp;
4855 }
4856 }
4857
4858 if (hci_conn_num(hdev, type) == conn_num)
4859 break;
4860 }
4861
bf4c6325
GP
4862 rcu_read_unlock();
4863
73d80deb
LAD
4864 if (!chan)
4865 return NULL;
4866
4867 switch (chan->conn->type) {
4868 case ACL_LINK:
4869 cnt = hdev->acl_cnt;
4870 break;
bd1eb66b
AE
4871 case AMP_LINK:
4872 cnt = hdev->block_cnt;
4873 break;
73d80deb
LAD
4874 case SCO_LINK:
4875 case ESCO_LINK:
4876 cnt = hdev->sco_cnt;
4877 break;
4878 case LE_LINK:
4879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4880 break;
4881 default:
4882 cnt = 0;
4883 BT_ERR("Unknown link type");
4884 }
4885
4886 q = cnt / num;
4887 *quote = q ? q : 1;
4888 BT_DBG("chan %p quote %d", chan, *quote);
4889 return chan;
4890}
4891
02b20f0b
LAD
4892static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4893{
4894 struct hci_conn_hash *h = &hdev->conn_hash;
4895 struct hci_conn *conn;
4896 int num = 0;
4897
4898 BT_DBG("%s", hdev->name);
4899
bf4c6325
GP
4900 rcu_read_lock();
4901
4902 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4903 struct hci_chan *chan;
4904
4905 if (conn->type != type)
4906 continue;
4907
4908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4909 continue;
4910
4911 num++;
4912
8192edef 4913 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4914 struct sk_buff *skb;
4915
4916 if (chan->sent) {
4917 chan->sent = 0;
4918 continue;
4919 }
4920
4921 if (skb_queue_empty(&chan->data_q))
4922 continue;
4923
4924 skb = skb_peek(&chan->data_q);
4925 if (skb->priority >= HCI_PRIO_MAX - 1)
4926 continue;
4927
4928 skb->priority = HCI_PRIO_MAX - 1;
4929
4930 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4931 skb->priority);
02b20f0b
LAD
4932 }
4933
4934 if (hci_conn_num(hdev, type) == num)
4935 break;
4936 }
bf4c6325
GP
4937
4938 rcu_read_unlock();
4939
02b20f0b
LAD
4940}
4941
b71d385a
AE
4942static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4943{
4944 /* Calculate count of blocks used by this packet */
4945 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4946}
4947
6039aa73 4948static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4949{
4a964404 4950 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4951 /* ACL tx timeout must be longer than maximum
4952 * link supervision timeout (40.9 seconds) */
63d2bc1b 4953 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4954 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4955 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4956 }
63d2bc1b 4957}
1da177e4 4958
6039aa73 4959static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4960{
4961 unsigned int cnt = hdev->acl_cnt;
4962 struct hci_chan *chan;
4963 struct sk_buff *skb;
4964 int quote;
4965
4966 __check_timeout(hdev, cnt);
04837f64 4967
73d80deb 4968 while (hdev->acl_cnt &&
a8c5fb1a 4969 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4970 u32 priority = (skb_peek(&chan->data_q))->priority;
4971 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4972 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4973 skb->len, skb->priority);
73d80deb 4974
ec1cce24
LAD
4975 /* Stop if priority has changed */
4976 if (skb->priority < priority)
4977 break;
4978
4979 skb = skb_dequeue(&chan->data_q);
4980
73d80deb 4981 hci_conn_enter_active_mode(chan->conn,
04124681 4982 bt_cb(skb)->force_active);
04837f64 4983
57d17d70 4984 hci_send_frame(hdev, skb);
1da177e4
LT
4985 hdev->acl_last_tx = jiffies;
4986
4987 hdev->acl_cnt--;
73d80deb
LAD
4988 chan->sent++;
4989 chan->conn->sent++;
1da177e4
LT
4990 }
4991 }
02b20f0b
LAD
4992
4993 if (cnt != hdev->acl_cnt)
4994 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4995}
4996
6039aa73 4997static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4998{
63d2bc1b 4999 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5000 struct hci_chan *chan;
5001 struct sk_buff *skb;
5002 int quote;
bd1eb66b 5003 u8 type;
b71d385a 5004
63d2bc1b 5005 __check_timeout(hdev, cnt);
b71d385a 5006
bd1eb66b
AE
5007 BT_DBG("%s", hdev->name);
5008
5009 if (hdev->dev_type == HCI_AMP)
5010 type = AMP_LINK;
5011 else
5012 type = ACL_LINK;
5013
b71d385a 5014 while (hdev->block_cnt > 0 &&
bd1eb66b 5015 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5016 u32 priority = (skb_peek(&chan->data_q))->priority;
5017 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5018 int blocks;
5019
5020 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5021 skb->len, skb->priority);
b71d385a
AE
5022
5023 /* Stop if priority has changed */
5024 if (skb->priority < priority)
5025 break;
5026
5027 skb = skb_dequeue(&chan->data_q);
5028
5029 blocks = __get_blocks(hdev, skb);
5030 if (blocks > hdev->block_cnt)
5031 return;
5032
5033 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5034 bt_cb(skb)->force_active);
b71d385a 5035
57d17d70 5036 hci_send_frame(hdev, skb);
b71d385a
AE
5037 hdev->acl_last_tx = jiffies;
5038
5039 hdev->block_cnt -= blocks;
5040 quote -= blocks;
5041
5042 chan->sent += blocks;
5043 chan->conn->sent += blocks;
5044 }
5045 }
5046
5047 if (cnt != hdev->block_cnt)
bd1eb66b 5048 hci_prio_recalculate(hdev, type);
b71d385a
AE
5049}
5050
6039aa73 5051static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5052{
5053 BT_DBG("%s", hdev->name);
5054
bd1eb66b
AE
5055 /* No ACL link over BR/EDR controller */
5056 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5057 return;
5058
5059 /* No AMP link over AMP controller */
5060 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5061 return;
5062
5063 switch (hdev->flow_ctl_mode) {
5064 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5065 hci_sched_acl_pkt(hdev);
5066 break;
5067
5068 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5069 hci_sched_acl_blk(hdev);
5070 break;
5071 }
5072}
5073
1da177e4 5074/* Schedule SCO */
6039aa73 5075static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5076{
5077 struct hci_conn *conn;
5078 struct sk_buff *skb;
5079 int quote;
5080
5081 BT_DBG("%s", hdev->name);
5082
52087a79
LAD
5083 if (!hci_conn_num(hdev, SCO_LINK))
5084 return;
5085
1da177e4
LT
5086 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5087 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5088 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5089 hci_send_frame(hdev, skb);
1da177e4
LT
5090
5091 conn->sent++;
5092 if (conn->sent == ~0)
5093 conn->sent = 0;
5094 }
5095 }
5096}
5097
6039aa73 5098static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5099{
5100 struct hci_conn *conn;
5101 struct sk_buff *skb;
5102 int quote;
5103
5104 BT_DBG("%s", hdev->name);
5105
52087a79
LAD
5106 if (!hci_conn_num(hdev, ESCO_LINK))
5107 return;
5108
8fc9ced3
GP
5109 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5110 &quote))) {
b6a0dc82
MH
5111 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5112 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5113 hci_send_frame(hdev, skb);
b6a0dc82
MH
5114
5115 conn->sent++;
5116 if (conn->sent == ~0)
5117 conn->sent = 0;
5118 }
5119 }
5120}
5121
6039aa73 5122static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5123{
73d80deb 5124 struct hci_chan *chan;
6ed58ec5 5125 struct sk_buff *skb;
02b20f0b 5126 int quote, cnt, tmp;
6ed58ec5
VT
5127
5128 BT_DBG("%s", hdev->name);
5129
52087a79
LAD
5130 if (!hci_conn_num(hdev, LE_LINK))
5131 return;
5132
4a964404 5133 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5134 /* LE tx timeout must be longer than maximum
5135 * link supervision timeout (40.9 seconds) */
bae1f5d9 5136 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5137 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5138 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5139 }
5140
5141 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5142 tmp = cnt;
73d80deb 5143 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5144 u32 priority = (skb_peek(&chan->data_q))->priority;
5145 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5146 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5147 skb->len, skb->priority);
6ed58ec5 5148
ec1cce24
LAD
5149 /* Stop if priority has changed */
5150 if (skb->priority < priority)
5151 break;
5152
5153 skb = skb_dequeue(&chan->data_q);
5154
57d17d70 5155 hci_send_frame(hdev, skb);
6ed58ec5
VT
5156 hdev->le_last_tx = jiffies;
5157
5158 cnt--;
73d80deb
LAD
5159 chan->sent++;
5160 chan->conn->sent++;
6ed58ec5
VT
5161 }
5162 }
73d80deb 5163
6ed58ec5
VT
5164 if (hdev->le_pkts)
5165 hdev->le_cnt = cnt;
5166 else
5167 hdev->acl_cnt = cnt;
02b20f0b
LAD
5168
5169 if (cnt != tmp)
5170 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5171}
5172
3eff45ea 5173static void hci_tx_work(struct work_struct *work)
1da177e4 5174{
3eff45ea 5175 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5176 struct sk_buff *skb;
5177
6ed58ec5 5178 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5179 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5180
52de599e
MH
5181 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5182 /* Schedule queues and send stuff to HCI driver */
5183 hci_sched_acl(hdev);
5184 hci_sched_sco(hdev);
5185 hci_sched_esco(hdev);
5186 hci_sched_le(hdev);
5187 }
6ed58ec5 5188
1da177e4
LT
5189 /* Send next queued raw (unknown type) packet */
5190 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5191 hci_send_frame(hdev, skb);
1da177e4
LT
5192}
5193
25985edc 5194/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5195
5196/* ACL data packet */
6039aa73 5197static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5198{
5199 struct hci_acl_hdr *hdr = (void *) skb->data;
5200 struct hci_conn *conn;
5201 __u16 handle, flags;
5202
5203 skb_pull(skb, HCI_ACL_HDR_SIZE);
5204
5205 handle = __le16_to_cpu(hdr->handle);
5206 flags = hci_flags(handle);
5207 handle = hci_handle(handle);
5208
f0e09510 5209 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5210 handle, flags);
1da177e4
LT
5211
5212 hdev->stat.acl_rx++;
5213
5214 hci_dev_lock(hdev);
5215 conn = hci_conn_hash_lookup_handle(hdev, handle);
5216 hci_dev_unlock(hdev);
8e87d142 5217
1da177e4 5218 if (conn) {
65983fc7 5219 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5220
1da177e4 5221 /* Send to upper protocol */
686ebf28
UF
5222 l2cap_recv_acldata(conn, skb, flags);
5223 return;
1da177e4 5224 } else {
8e87d142 5225 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5226 hdev->name, handle);
1da177e4
LT
5227 }
5228
5229 kfree_skb(skb);
5230}
5231
5232/* SCO data packet */
6039aa73 5233static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5234{
5235 struct hci_sco_hdr *hdr = (void *) skb->data;
5236 struct hci_conn *conn;
5237 __u16 handle;
5238
5239 skb_pull(skb, HCI_SCO_HDR_SIZE);
5240
5241 handle = __le16_to_cpu(hdr->handle);
5242
f0e09510 5243 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5244
5245 hdev->stat.sco_rx++;
5246
5247 hci_dev_lock(hdev);
5248 conn = hci_conn_hash_lookup_handle(hdev, handle);
5249 hci_dev_unlock(hdev);
5250
5251 if (conn) {
1da177e4 5252 /* Send to upper protocol */
686ebf28
UF
5253 sco_recv_scodata(conn, skb);
5254 return;
1da177e4 5255 } else {
8e87d142 5256 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5257 hdev->name, handle);
1da177e4
LT
5258 }
5259
5260 kfree_skb(skb);
5261}
5262
9238f36a
JH
5263static bool hci_req_is_complete(struct hci_dev *hdev)
5264{
5265 struct sk_buff *skb;
5266
5267 skb = skb_peek(&hdev->cmd_q);
5268 if (!skb)
5269 return true;
5270
5271 return bt_cb(skb)->req.start;
5272}
5273
42c6b129
JH
5274static void hci_resend_last(struct hci_dev *hdev)
5275{
5276 struct hci_command_hdr *sent;
5277 struct sk_buff *skb;
5278 u16 opcode;
5279
5280 if (!hdev->sent_cmd)
5281 return;
5282
5283 sent = (void *) hdev->sent_cmd->data;
5284 opcode = __le16_to_cpu(sent->opcode);
5285 if (opcode == HCI_OP_RESET)
5286 return;
5287
5288 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5289 if (!skb)
5290 return;
5291
5292 skb_queue_head(&hdev->cmd_q, skb);
5293 queue_work(hdev->workqueue, &hdev->cmd_work);
5294}
5295
9238f36a
JH
5296void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5297{
5298 hci_req_complete_t req_complete = NULL;
5299 struct sk_buff *skb;
5300 unsigned long flags;
5301
5302 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5303
42c6b129
JH
5304 /* If the completed command doesn't match the last one that was
5305 * sent we need to do special handling of it.
9238f36a 5306 */
42c6b129
JH
5307 if (!hci_sent_cmd_data(hdev, opcode)) {
5308 /* Some CSR based controllers generate a spontaneous
5309 * reset complete event during init and any pending
5310 * command will never be completed. In such a case we
5311 * need to resend whatever was the last sent
5312 * command.
5313 */
5314 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5315 hci_resend_last(hdev);
5316
9238f36a 5317 return;
42c6b129 5318 }
9238f36a
JH
5319
5320 /* If the command succeeded and there's still more commands in
5321 * this request the request is not yet complete.
5322 */
5323 if (!status && !hci_req_is_complete(hdev))
5324 return;
5325
5326 /* If this was the last command in a request the complete
5327 * callback would be found in hdev->sent_cmd instead of the
5328 * command queue (hdev->cmd_q).
5329 */
5330 if (hdev->sent_cmd) {
5331 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5332
5333 if (req_complete) {
5334 /* We must set the complete callback to NULL to
5335 * avoid calling the callback more than once if
5336 * this function gets called again.
5337 */
5338 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5339
9238f36a 5340 goto call_complete;
53e21fbc 5341 }
9238f36a
JH
5342 }
5343
5344 /* Remove all pending commands belonging to this request */
5345 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5346 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5347 if (bt_cb(skb)->req.start) {
5348 __skb_queue_head(&hdev->cmd_q, skb);
5349 break;
5350 }
5351
5352 req_complete = bt_cb(skb)->req.complete;
5353 kfree_skb(skb);
5354 }
5355 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5356
5357call_complete:
5358 if (req_complete)
5359 req_complete(hdev, status);
5360}
5361
b78752cc 5362static void hci_rx_work(struct work_struct *work)
1da177e4 5363{
b78752cc 5364 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5365 struct sk_buff *skb;
5366
5367 BT_DBG("%s", hdev->name);
5368
1da177e4 5369 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5370 /* Send copy to monitor */
5371 hci_send_to_monitor(hdev, skb);
5372
1da177e4
LT
5373 if (atomic_read(&hdev->promisc)) {
5374 /* Send copy to the sockets */
470fe1b5 5375 hci_send_to_sock(hdev, skb);
1da177e4
LT
5376 }
5377
fee746b0 5378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5379 kfree_skb(skb);
5380 continue;
5381 }
5382
5383 if (test_bit(HCI_INIT, &hdev->flags)) {
5384 /* Don't process data packets in this states. */
0d48d939 5385 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5386 case HCI_ACLDATA_PKT:
5387 case HCI_SCODATA_PKT:
5388 kfree_skb(skb);
5389 continue;
3ff50b79 5390 }
1da177e4
LT
5391 }
5392
5393 /* Process frame */
0d48d939 5394 switch (bt_cb(skb)->pkt_type) {
1da177e4 5395 case HCI_EVENT_PKT:
b78752cc 5396 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5397 hci_event_packet(hdev, skb);
5398 break;
5399
5400 case HCI_ACLDATA_PKT:
5401 BT_DBG("%s ACL data packet", hdev->name);
5402 hci_acldata_packet(hdev, skb);
5403 break;
5404
5405 case HCI_SCODATA_PKT:
5406 BT_DBG("%s SCO data packet", hdev->name);
5407 hci_scodata_packet(hdev, skb);
5408 break;
5409
5410 default:
5411 kfree_skb(skb);
5412 break;
5413 }
5414 }
1da177e4
LT
5415}
5416
c347b765 5417static void hci_cmd_work(struct work_struct *work)
1da177e4 5418{
c347b765 5419 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5420 struct sk_buff *skb;
5421
2104786b
AE
5422 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5423 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5424
1da177e4 5425 /* Send queued commands */
5a08ecce
AE
5426 if (atomic_read(&hdev->cmd_cnt)) {
5427 skb = skb_dequeue(&hdev->cmd_q);
5428 if (!skb)
5429 return;
5430
7585b97a 5431 kfree_skb(hdev->sent_cmd);
1da177e4 5432
a675d7f1 5433 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5434 if (hdev->sent_cmd) {
1da177e4 5435 atomic_dec(&hdev->cmd_cnt);
57d17d70 5436 hci_send_frame(hdev, skb);
7bdb8a5c 5437 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5438 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5439 else
65cc2b49
MH
5440 schedule_delayed_work(&hdev->cmd_timer,
5441 HCI_CMD_TIMEOUT);
1da177e4
LT
5442 } else {
5443 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5444 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5445 }
5446 }
5447}
b1efcc28
AG
5448
5449void hci_req_add_le_scan_disable(struct hci_request *req)
5450{
5451 struct hci_cp_le_set_scan_enable cp;
5452
5453 memset(&cp, 0, sizeof(cp));
5454 cp.enable = LE_SCAN_DISABLE;
5455 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5456}
a4790dbd 5457
8540f6c0
MH
5458static void add_to_white_list(struct hci_request *req,
5459 struct hci_conn_params *params)
5460{
5461 struct hci_cp_le_add_to_white_list cp;
5462
5463 cp.bdaddr_type = params->addr_type;
5464 bacpy(&cp.bdaddr, &params->addr);
5465
5466 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5467}
5468
5469static u8 update_white_list(struct hci_request *req)
5470{
5471 struct hci_dev *hdev = req->hdev;
5472 struct hci_conn_params *params;
5473 struct bdaddr_list *b;
5474 uint8_t white_list_entries = 0;
5475
5476 /* Go through the current white list programmed into the
5477 * controller one by one and check if that address is still
5478 * in the list of pending connections or list of devices to
5479 * report. If not present in either list, then queue the
5480 * command to remove it from the controller.
5481 */
5482 list_for_each_entry(b, &hdev->le_white_list, list) {
5483 struct hci_cp_le_del_from_white_list cp;
5484
5485 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5486 &b->bdaddr, b->bdaddr_type) ||
5487 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5488 &b->bdaddr, b->bdaddr_type)) {
5489 white_list_entries++;
5490 continue;
5491 }
5492
5493 cp.bdaddr_type = b->bdaddr_type;
5494 bacpy(&cp.bdaddr, &b->bdaddr);
5495
5496 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5497 sizeof(cp), &cp);
5498 }
5499
5500 /* Since all no longer valid white list entries have been
5501 * removed, walk through the list of pending connections
5502 * and ensure that any new device gets programmed into
5503 * the controller.
5504 *
5505 * If the list of the devices is larger than the list of
5506 * available white list entries in the controller, then
5507 * just abort and return filer policy value to not use the
5508 * white list.
5509 */
5510 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5511 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5512 &params->addr, params->addr_type))
5513 continue;
5514
5515 if (white_list_entries >= hdev->le_white_list_size) {
5516 /* Select filter policy to accept all advertising */
5517 return 0x00;
5518 }
5519
66d8e837
MH
5520 if (hci_find_irk_by_addr(hdev, &params->addr,
5521 params->addr_type)) {
5522 /* White list can not be used with RPAs */
5523 return 0x00;
5524 }
5525
8540f6c0
MH
5526 white_list_entries++;
5527 add_to_white_list(req, params);
5528 }
5529
5530 /* After adding all new pending connections, walk through
5531 * the list of pending reports and also add these to the
5532 * white list if there is still space.
5533 */
5534 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5535 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5536 &params->addr, params->addr_type))
5537 continue;
5538
5539 if (white_list_entries >= hdev->le_white_list_size) {
5540 /* Select filter policy to accept all advertising */
5541 return 0x00;
5542 }
5543
66d8e837
MH
5544 if (hci_find_irk_by_addr(hdev, &params->addr,
5545 params->addr_type)) {
5546 /* White list can not be used with RPAs */
5547 return 0x00;
5548 }
5549
8540f6c0
MH
5550 white_list_entries++;
5551 add_to_white_list(req, params);
5552 }
5553
5554 /* Select filter policy to use white list */
5555 return 0x01;
5556}
5557
8ef30fd3
AG
5558void hci_req_add_le_passive_scan(struct hci_request *req)
5559{
5560 struct hci_cp_le_set_scan_param param_cp;
5561 struct hci_cp_le_set_scan_enable enable_cp;
5562 struct hci_dev *hdev = req->hdev;
5563 u8 own_addr_type;
8540f6c0 5564 u8 filter_policy;
8ef30fd3 5565
6ab535a7
MH
5566 /* Set require_privacy to false since no SCAN_REQ are send
5567 * during passive scanning. Not using an unresolvable address
5568 * here is important so that peer devices using direct
5569 * advertising with our address will be correctly reported
5570 * by the controller.
8ef30fd3 5571 */
6ab535a7 5572 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5573 return;
5574
8540f6c0
MH
5575 /* Adding or removing entries from the white list must
5576 * happen before enabling scanning. The controller does
5577 * not allow white list modification while scanning.
5578 */
5579 filter_policy = update_white_list(req);
5580
8ef30fd3
AG
5581 memset(&param_cp, 0, sizeof(param_cp));
5582 param_cp.type = LE_SCAN_PASSIVE;
5583 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5584 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5585 param_cp.own_address_type = own_addr_type;
8540f6c0 5586 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5587 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5588 &param_cp);
5589
5590 memset(&enable_cp, 0, sizeof(enable_cp));
5591 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5592 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5593 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5594 &enable_cp);
5595}
5596
a4790dbd
AG
5597static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5598{
5599 if (status)
5600 BT_DBG("HCI request failed to update background scanning: "
5601 "status 0x%2.2x", status);
5602}
5603
5604/* This function controls the background scanning based on hdev->pend_le_conns
5605 * list. If there are pending LE connection we start the background scanning,
5606 * otherwise we stop it.
5607 *
5608 * This function requires the caller holds hdev->lock.
5609 */
5610void hci_update_background_scan(struct hci_dev *hdev)
5611{
a4790dbd
AG
5612 struct hci_request req;
5613 struct hci_conn *conn;
5614 int err;
5615
c20c02d5
MH
5616 if (!test_bit(HCI_UP, &hdev->flags) ||
5617 test_bit(HCI_INIT, &hdev->flags) ||
5618 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5619 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5620 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5621 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5622 return;
5623
a70f4b5f
JH
5624 /* No point in doing scanning if LE support hasn't been enabled */
5625 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5626 return;
5627
ae23ada4
JH
5628 /* If discovery is active don't interfere with it */
5629 if (hdev->discovery.state != DISCOVERY_STOPPED)
5630 return;
5631
a4790dbd
AG
5632 hci_req_init(&req, hdev);
5633
d1d588c1 5634 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5635 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5636 /* If there is no pending LE connections or devices
5637 * to be scanned for, we should stop the background
5638 * scanning.
a4790dbd
AG
5639 */
5640
5641 /* If controller is not scanning we are done. */
5642 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5643 return;
5644
5645 hci_req_add_le_scan_disable(&req);
5646
5647 BT_DBG("%s stopping background scanning", hdev->name);
5648 } else {
a4790dbd
AG
5649 /* If there is at least one pending LE connection, we should
5650 * keep the background scan running.
5651 */
5652
a4790dbd
AG
5653 /* If controller is connecting, we should not start scanning
5654 * since some controllers are not able to scan and connect at
5655 * the same time.
5656 */
5657 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5658 if (conn)
5659 return;
5660
4340a124
AG
5661 /* If controller is currently scanning, we stop it to ensure we
5662 * don't miss any advertising (due to duplicates filter).
5663 */
5664 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5665 hci_req_add_le_scan_disable(&req);
5666
8ef30fd3 5667 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5668
5669 BT_DBG("%s starting background scanning", hdev->name);
5670 }
5671
5672 err = hci_req_run(&req, update_background_scan_complete);
5673 if (err)
5674 BT_ERR("Failed to run HCI request: err %d", err);
5675}
432df05e 5676
22f433dc
JH
5677static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5678{
5679 struct bdaddr_list *b;
5680
5681 list_for_each_entry(b, &hdev->whitelist, list) {
5682 struct hci_conn *conn;
5683
5684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5685 if (!conn)
5686 return true;
5687
5688 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5689 return true;
5690 }
5691
5692 return false;
5693}
5694
432df05e
JH
5695void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5696{
5697 u8 scan;
5698
5699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5700 return;
5701
5702 if (!hdev_is_powered(hdev))
5703 return;
5704
5705 if (mgmt_powering_down(hdev))
5706 return;
5707
5708 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5709 disconnected_whitelist_entries(hdev))
432df05e
JH
5710 scan = SCAN_PAGE;
5711 else
5712 scan = SCAN_DISABLED;
5713
5714 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5715 return;
5716
5717 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5718 scan |= SCAN_INQUIRY;
5719
5720 if (req)
5721 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5722 else
5723 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5724}
This page took 1.412922 seconds and 5 git commands to generate.