Bluetooth: Add SMP L2CAP channel skeleton
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
6659358e
JH
203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
47219839
MH
228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
235 u8 i, val[16];
236
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
243
244 seq_printf(f, "%pUb\n", val);
47219839
MH
245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
baf27f6e
MH
263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
02d08d15
MH
299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
babdbb3c
MH
327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
041000b9
MH
351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
ebd1e33b
MH
365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
5afeac14
MH
390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
111902f7 396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
111902f7 421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
422 return -EALREADY;
423
111902f7 424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
134c2a89
MH
436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
2bfa3531
MH
454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
2be48b65 462 hdev->idle_timeout = val;
2bfa3531
MH
463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
c982b2ea
JH
482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
2bfa3531
MH
513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
2be48b65 521 hdev->sniff_min_interval = val;
2bfa3531
MH
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
2be48b65 549 hdev->sniff_max_interval = val;
2bfa3531
MH
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
31ad1691
AK
569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
ac345813
MH
625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
a1f4c318 628 bdaddr_t addr;
ac345813
MH
629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
a1f4c318 633 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 634
a1f4c318 635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 636 16, hdev->irk, &hdev->rpa);
ac345813
MH
637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
7a4cd51d
MH
655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
e7b8fc92
MH
678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
b32bba6c
MH
701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
92202185 704{
b32bba6c
MH
705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
92202185 707
111902f7 708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
712}
713
b32bba6c
MH
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
92202185 717{
b32bba6c
MH
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
92202185 722
b32bba6c
MH
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
92202185 725
b32bba6c
MH
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
731 return -EINVAL;
732
111902f7 733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
734 return -EALREADY;
735
111902f7 736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
737
738 return count;
92202185
MH
739}
740
b32bba6c
MH
741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
92202185 747
d2ab0ac1
MH
748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
3698d704
MH
773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
8f8625cd
MH
803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
f813f1be 809 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 814 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
4e70c7e7
MH
833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_min_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
2be48b65 869 hdev->le_conn_max_interval = val;
4e70c7e7
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
816a93d1 889static int conn_latency_set(void *data, u64 val)
3f959d46
MH
890{
891 struct hci_dev *hdev = data;
892
816a93d1 893 if (val > 0x01f3)
3f959d46
MH
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
816a93d1 897 hdev->le_conn_latency = val;
3f959d46
MH
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
816a93d1 903static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
816a93d1 908 *val = hdev->le_conn_latency;
3f959d46
MH
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
816a93d1
MH
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
3f959d46 916
f1649577 917static int supervision_timeout_set(void *data, u64 val)
89863109 918{
f1649577 919 struct hci_dev *hdev = data;
89863109 920
f1649577
MH
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
89863109
JR
929}
930
f1649577 931static int supervision_timeout_get(void *data, u64 *val)
89863109 932{
f1649577 933 struct hci_dev *hdev = data;
89863109 934
f1649577
MH
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
89863109 938
f1649577
MH
939 return 0;
940}
89863109 941
f1649577
MH
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
89863109 944
3f959d46
MH
945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
89863109 948
3f959d46
MH
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
89863109 951
3f959d46
MH
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
89863109 955
3f959d46
MH
956 return 0;
957}
89863109 958
3f959d46 959static int adv_channel_map_get(void *data, u64 *val)
7d474e06 960{
3f959d46 961 struct hci_dev *hdev = data;
7d474e06
AG
962
963 hci_dev_lock(hdev);
3f959d46
MH
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
7d474e06 966
3f959d46
MH
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
7d474e06 972
729a1051
GL
973static int adv_min_interval_set(void *data, u64 val)
974{
975 struct hci_dev *hdev = data;
976
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 return -EINVAL;
979
980 hci_dev_lock(hdev);
981 hdev->le_adv_min_interval = val;
7d474e06
AG
982 hci_dev_unlock(hdev);
983
984 return 0;
985}
986
729a1051 987static int adv_min_interval_get(void *data, u64 *val)
7d474e06 988{
729a1051
GL
989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
7d474e06
AG
996}
997
729a1051
GL
998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
7d474e06 1002{
729a1051 1003 struct hci_dev *hdev = data;
7d474e06 1004
729a1051 1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
1006 return -EINVAL;
1007
729a1051
GL
1008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
7d474e06 1011
729a1051
GL
1012 return 0;
1013}
7d474e06 1014
729a1051
GL
1015static int adv_max_interval_get(void *data, u64 *val)
1016{
1017 struct hci_dev *hdev = data;
7d474e06 1018
729a1051
GL
1019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
7d474e06 1022
729a1051
GL
1023 return 0;
1024}
7d474e06 1025
729a1051
GL
1026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
7d474e06 1028
0b3c7d37 1029static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1030{
0b3c7d37 1031 struct hci_dev *hdev = f->private;
7d474e06 1032 struct hci_conn_params *p;
7d474e06 1033
7d474e06 1034 hci_dev_lock(hdev);
7d474e06 1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06 1037 p->auto_connect);
7d474e06 1038 }
7d474e06 1039 hci_dev_unlock(hdev);
7d474e06 1040
7d474e06
AG
1041 return 0;
1042}
7d474e06 1043
0b3c7d37 1044static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1045{
0b3c7d37 1046 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1047}
1048
0b3c7d37
MH
1049static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
7d474e06 1051 .read = seq_read,
7d474e06
AG
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
1da177e4
LT
1056/* ---- HCI requests ---- */
1057
42c6b129 1058static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1059{
42c6b129 1060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1061
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1066 }
1067}
1068
1069static void hci_req_cancel(struct hci_dev *hdev, int err)
1070{
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1077 }
1078}
1079
77a63e0a
FW
1080static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
75e84b7c
JH
1082{
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1086
1087 hci_dev_lock(hdev);
1088
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1091
1092 hci_dev_unlock(hdev);
1093
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1096
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1100 }
1101
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
7b1abbbe
JH
1105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1109 }
1110
75e84b7c
JH
1111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1114 }
1115
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1119 }
1120
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1123
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1126
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1129
1130failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1133}
1134
7b1abbbe 1135struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1136 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1137{
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1141
1142 BT_DBG("%s", hdev->name);
1143
1144 hci_req_init(&req, hdev);
1145
7b1abbbe 1146 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1147
1148 hdev->req_status = HCI_REQ_PEND;
1149
1150 err = hci_req_run(&req, hci_req_sync_complete);
1151 if (err < 0)
1152 return ERR_PTR(err);
1153
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1156
1157 schedule_timeout(timeout);
1158
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1163
1164 switch (hdev->req_status) {
1165 case HCI_REQ_DONE:
1166 err = -bt_to_errno(hdev->req_result);
1167 break;
1168
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1171 break;
1172
1173 default:
1174 err = -ETIMEDOUT;
1175 break;
1176 }
1177
1178 hdev->req_status = hdev->req_result = 0;
1179
1180 BT_DBG("%s end: err %d", hdev->name, err);
1181
1182 if (err < 0)
1183 return ERR_PTR(err);
1184
7b1abbbe
JH
1185 return hci_get_cmd_complete(hdev, opcode, event);
1186}
1187EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1190 const void *param, u32 timeout)
7b1abbbe
JH
1191{
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1193}
1194EXPORT_SYMBOL(__hci_cmd_sync);
1195
1da177e4 1196/* Execute request and wait for completion. */
01178cd4 1197static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1198 void (*func)(struct hci_request *req,
1199 unsigned long opt),
01178cd4 1200 unsigned long opt, __u32 timeout)
1da177e4 1201{
42c6b129 1202 struct hci_request req;
1da177e4
LT
1203 DECLARE_WAITQUEUE(wait, current);
1204 int err = 0;
1205
1206 BT_DBG("%s start", hdev->name);
1207
42c6b129
JH
1208 hci_req_init(&req, hdev);
1209
1da177e4
LT
1210 hdev->req_status = HCI_REQ_PEND;
1211
42c6b129 1212 func(&req, opt);
53cce22d 1213
42c6b129
JH
1214 err = hci_req_run(&req, hci_req_sync_complete);
1215 if (err < 0) {
53cce22d 1216 hdev->req_status = 0;
920c8300
AG
1217
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
42c6b129 1222 */
920c8300
AG
1223 if (err == -ENODATA)
1224 return 0;
1225
1226 return err;
53cce22d
JH
1227 }
1228
bc4445c7
AG
1229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1231
1da177e4
LT
1232 schedule_timeout(timeout);
1233
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236 if (signal_pending(current))
1237 return -EINTR;
1238
1239 switch (hdev->req_status) {
1240 case HCI_REQ_DONE:
e175072f 1241 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1242 break;
1243
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1246 break;
1247
1248 default:
1249 err = -ETIMEDOUT;
1250 break;
3ff50b79 1251 }
1da177e4 1252
a5040efa 1253 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1254
1255 BT_DBG("%s end: err %d", hdev->name, err);
1256
1257 return err;
1258}
1259
01178cd4 1260static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1261 void (*req)(struct hci_request *req,
1262 unsigned long opt),
01178cd4 1263 unsigned long opt, __u32 timeout)
1da177e4
LT
1264{
1265 int ret;
1266
7c6a329e
MH
1267 if (!test_bit(HCI_UP, &hdev->flags))
1268 return -ENETDOWN;
1269
1da177e4
LT
1270 /* Serialize all requests */
1271 hci_req_lock(hdev);
01178cd4 1272 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1273 hci_req_unlock(hdev);
1274
1275 return ret;
1276}
1277
42c6b129 1278static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1279{
42c6b129 1280 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1281
1282 /* Reset device */
42c6b129
JH
1283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1285}
1286
42c6b129 1287static void bredr_init(struct hci_request *req)
1da177e4 1288{
42c6b129 1289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1290
1da177e4 1291 /* Read Local Supported Features */
42c6b129 1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1293
1143e5a6 1294 /* Read Local Version */
42c6b129 1295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1296
1297 /* Read BD Address */
42c6b129 1298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1299}
1300
42c6b129 1301static void amp_init(struct hci_request *req)
e61ef499 1302{
42c6b129 1303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1304
e61ef499 1305 /* Read Local Version */
42c6b129 1306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1307
f6996cfe
MH
1308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
6bcbc489 1314 /* Read Local AMP Info */
42c6b129 1315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1316
1317 /* Read Data Blk size */
42c6b129 1318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1319
f38ba941
MH
1320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
7528ca1c
MH
1323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1325}
1326
42c6b129 1327static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1328{
42c6b129 1329 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1330
1331 BT_DBG("%s %ld", hdev->name, opt);
1332
11778716
AE
1333 /* Reset */
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1335 hci_reset_req(req, 0);
11778716 1336
e61ef499
AE
1337 switch (hdev->dev_type) {
1338 case HCI_BREDR:
42c6b129 1339 bredr_init(req);
e61ef499
AE
1340 break;
1341
1342 case HCI_AMP:
42c6b129 1343 amp_init(req);
e61ef499
AE
1344 break;
1345
1346 default:
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1348 break;
1349 }
e61ef499
AE
1350}
1351
42c6b129 1352static void bredr_setup(struct hci_request *req)
2177bab5 1353{
4ca048e3
MH
1354 struct hci_dev *hdev = req->hdev;
1355
2177bab5
JH
1356 __le16 param;
1357 __u8 flt_type;
1358
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1361
1362 /* Read Class of Device */
42c6b129 1363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1364
1365 /* Read Local Name */
42c6b129 1366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1367
1368 /* Read Voice Setting */
42c6b129 1369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1370
b4cb9fb2
MH
1371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
4b836f39
MH
1374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
2177bab5
JH
1377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1380
1381 /* Connection accept timeout ~20 secs */
dcf4adbf 1382 param = cpu_to_le16(0x7d00);
42c6b129 1383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1384
4ca048e3
MH
1385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1387 */
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391 }
2177bab5
JH
1392}
1393
42c6b129 1394static void le_setup(struct hci_request *req)
2177bab5 1395{
c73eee91
JH
1396 struct hci_dev *hdev = req->hdev;
1397
2177bab5 1398 /* Read LE Buffer Size */
42c6b129 1399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1400
1401 /* Read LE Local Supported Features */
42c6b129 1402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1403
747d3f03
MH
1404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
2177bab5 1407 /* Read LE White List Size */
42c6b129 1408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1409
747d3f03
MH
1410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1412
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1416}
1417
1418static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419{
1420 if (lmp_ext_inq_capable(hdev))
1421 return 0x02;
1422
1423 if (lmp_inq_rssi_capable(hdev))
1424 return 0x01;
1425
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1428 return 0x01;
1429
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 return 0x01;
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 return 0x01;
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 return 0x01;
1437 }
1438
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1441 return 0x01;
1442
1443 return 0x00;
1444}
1445
42c6b129 1446static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1447{
1448 u8 mode;
1449
42c6b129 1450 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1451
42c6b129 1452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1453}
1454
42c6b129 1455static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1456{
42c6b129
JH
1457 struct hci_dev *hdev = req->hdev;
1458
2177bab5
JH
1459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1462 */
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1467 */
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 return;
1470
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1477 } else {
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1487
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 }
2177bab5
JH
1492 }
1493
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1499
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1523 */
1524 }
1525
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1528
42c6b129 1529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1530}
1531
42c6b129 1532static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1533{
42c6b129
JH
1534 struct hci_dev *hdev = req->hdev;
1535
2177bab5 1536 if (lmp_bredr_capable(hdev))
42c6b129 1537 bredr_setup(req);
56f87901
JH
1538 else
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1540
1541 if (lmp_le_capable(hdev))
42c6b129 1542 le_setup(req);
2177bab5 1543
3f8e2d75
JH
1544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1546 */
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1549
1550 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1556 */
1557 hdev->max_page = 0x01;
1558
2177bab5
JH
1559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 u8 mode = 0x01;
42c6b129
JH
1561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
2177bab5
JH
1563 } else {
1564 struct hci_cp_write_eir cp;
1565
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1568
42c6b129 1569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1570 }
1571 }
1572
1573 if (lmp_inq_rssi_capable(hdev))
42c6b129 1574 hci_setup_inquiry_mode(req);
2177bab5
JH
1575
1576 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1578
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1581
1582 cp.page = 0x01;
42c6b129
JH
1583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 sizeof(cp), &cp);
2177bab5
JH
1585 }
1586
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 u8 enable = 1;
42c6b129
JH
1589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 &enable);
2177bab5
JH
1591 }
1592}
1593
42c6b129 1594static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1595{
42c6b129 1596 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1599
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1608
1609 cp.policy = cpu_to_le16(link_policy);
42c6b129 1610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1611}
1612
42c6b129 1613static void hci_set_le_support(struct hci_request *req)
2177bab5 1614{
42c6b129 1615 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1616 struct hci_cp_write_le_host_supported cp;
1617
c73eee91
JH
1618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1620 return;
1621
2177bab5
JH
1622 memset(&cp, 0, sizeof(cp));
1623
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 cp.le = 0x01;
32226e4f 1626 cp.simul = 0x00;
2177bab5
JH
1627 }
1628
1629 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 &cp);
2177bab5
JH
1632}
1633
d62e6d67
JH
1634static void hci_set_event_mask_page_2(struct hci_request *req)
1635{
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1641 */
53b834d2 1642 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1647 }
1648
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1651 */
53b834d2 1652 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1657 }
1658
40c59fcb 1659 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1661 events[2] |= 0x80;
1662
d62e6d67
JH
1663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664}
1665
42c6b129 1666static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1667{
42c6b129 1668 struct hci_dev *hdev = req->hdev;
d2c5d77f 1669 u8 p;
42c6b129 1670
0da71f1b
MH
1671 hci_setup_event_mask(req);
1672
b8f4e068
GP
1673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1676 *
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
f9f462fa
MH
1681 *
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
637b4cae 1685 */
f9f462fa
MH
1686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1688 struct hci_cp_delete_stored_link_key cp;
1689
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 sizeof(cp), &cp);
1694 }
1695
2177bab5 1696 if (hdev->commands[5] & 0x10)
42c6b129 1697 hci_setup_link_policy(req);
2177bab5 1698
9193c6e8
AG
1699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1701
1702 memset(events, 0, sizeof(events));
4d6c705b
MH
1703 events[0] = 0x0f;
1704
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1707
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1710 */
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1714 */
1715
9193c6e8
AG
1716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1718
15a49cca
MH
1719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 }
1723
42c6b129 1724 hci_set_le_support(req);
9193c6e8 1725 }
d2c5d77f
JH
1726
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1730
1731 cp.page = p;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 sizeof(cp), &cp);
1734 }
2177bab5
JH
1735}
1736
5d4e7e8d
JH
1737static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738{
1739 struct hci_dev *hdev = req->hdev;
1740
d62e6d67
JH
1741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1744
109e3191
MH
1745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
f4fe73ed
MH
1749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
5d4e7e8d 1753 /* Check for Synchronization Train support */
53b834d2 1754 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1756
1757 /* Enable Secure Connections if supported and configured */
5afeac14 1758 if ((lmp_sc_capable(hdev) ||
111902f7 1759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 u8 support = 0x01;
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1764 }
5d4e7e8d
JH
1765}
1766
2177bab5
JH
1767static int __hci_init(struct hci_dev *hdev)
1768{
1769 int err;
1770
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
4b4148e9
MH
1775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1777 */
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 &dut_mode_fops);
1781 }
1782
2177bab5
JH
1783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1785 * first stage init.
1786 */
1787 if (hdev->dev_type != HCI_BREDR)
1788 return 0;
1789
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
5d4e7e8d
JH
1794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
baf27f6e
MH
1798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1801
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1804 */
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 return 0;
1807
dfb826a8
MH
1808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 &features_fops);
ceeb3bc0
MH
1810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 &blacklist_fops);
6659358e
JH
1816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
47219839
MH
1818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
31ad1691
AK
1820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1824
baf27f6e
MH
1825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
02d08d15
MH
1828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
babdbb3c
MH
1830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
041000b9
MH
1832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
baf27f6e
MH
1834 }
1835
06f5b778 1836 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
5afeac14
MH
1839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
134c2a89
MH
1841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
06f5b778 1843 }
ebd1e33b 1844
2bfa3531
MH
1845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1852 }
1853
d0f729b8 1854 if (lmp_le_capable(hdev)) {
ac345813
MH
1855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
b32bba6c
MH
1861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1863
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1867 */
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1872
d0f729b8
MH
1873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
d2ab0ac1
MH
1875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 &white_list_fops);
3698d704
MH
1877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
8f8625cd
MH
1880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
4e70c7e7
MH
1882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
816a93d1
MH
1886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
f1649577
MH
1888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
3f959d46
MH
1890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
729a1051
GL
1892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
0b3c7d37
MH
1896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
b9a7a61e
LR
1898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 hdev->debugfs,
1900 &hdev->discov_interleaved_timeout);
54506918 1901
711eafe3 1902 smp_register(hdev);
d0f729b8 1903 }
e7b8fc92 1904
baf27f6e 1905 return 0;
2177bab5
JH
1906}
1907
0ebca7d6
MH
1908static void hci_init0_req(struct hci_request *req, unsigned long opt)
1909{
1910 struct hci_dev *hdev = req->hdev;
1911
1912 BT_DBG("%s %ld", hdev->name, opt);
1913
1914 /* Reset */
1915 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1916 hci_reset_req(req, 0);
1917
1918 /* Read Local Version */
1919 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1920
1921 /* Read BD Address */
1922 if (hdev->set_bdaddr)
1923 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924}
1925
1926static int __hci_unconf_init(struct hci_dev *hdev)
1927{
1928 int err;
1929
cc78b44b
MH
1930 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1931 return 0;
1932
0ebca7d6
MH
1933 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1934 if (err < 0)
1935 return err;
1936
1937 return 0;
1938}
1939
42c6b129 1940static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1941{
1942 __u8 scan = opt;
1943
42c6b129 1944 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1945
1946 /* Inquiry and Page scans */
42c6b129 1947 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1948}
1949
42c6b129 1950static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1951{
1952 __u8 auth = opt;
1953
42c6b129 1954 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1955
1956 /* Authentication */
42c6b129 1957 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1958}
1959
42c6b129 1960static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1961{
1962 __u8 encrypt = opt;
1963
42c6b129 1964 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1965
e4e8e37c 1966 /* Encryption */
42c6b129 1967 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1968}
1969
42c6b129 1970static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1971{
1972 __le16 policy = cpu_to_le16(opt);
1973
42c6b129 1974 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1975
1976 /* Default link policy */
42c6b129 1977 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1978}
1979
8e87d142 1980/* Get HCI device by index.
1da177e4
LT
1981 * Device is held on return. */
1982struct hci_dev *hci_dev_get(int index)
1983{
8035ded4 1984 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1985
1986 BT_DBG("%d", index);
1987
1988 if (index < 0)
1989 return NULL;
1990
1991 read_lock(&hci_dev_list_lock);
8035ded4 1992 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1993 if (d->id == index) {
1994 hdev = hci_dev_hold(d);
1995 break;
1996 }
1997 }
1998 read_unlock(&hci_dev_list_lock);
1999 return hdev;
2000}
1da177e4
LT
2001
2002/* ---- Inquiry support ---- */
ff9ef578 2003
30dc78e1
JH
2004bool hci_discovery_active(struct hci_dev *hdev)
2005{
2006 struct discovery_state *discov = &hdev->discovery;
2007
6fbe195d 2008 switch (discov->state) {
343f935b 2009 case DISCOVERY_FINDING:
6fbe195d 2010 case DISCOVERY_RESOLVING:
30dc78e1
JH
2011 return true;
2012
6fbe195d
AG
2013 default:
2014 return false;
2015 }
30dc78e1
JH
2016}
2017
ff9ef578
JH
2018void hci_discovery_set_state(struct hci_dev *hdev, int state)
2019{
bb3e0a33
JH
2020 int old_state = hdev->discovery.state;
2021
ff9ef578
JH
2022 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2023
bb3e0a33 2024 if (old_state == state)
ff9ef578
JH
2025 return;
2026
bb3e0a33
JH
2027 hdev->discovery.state = state;
2028
ff9ef578
JH
2029 switch (state) {
2030 case DISCOVERY_STOPPED:
c54c3860
AG
2031 hci_update_background_scan(hdev);
2032
bb3e0a33 2033 if (old_state != DISCOVERY_STARTING)
7b99b659 2034 mgmt_discovering(hdev, 0);
ff9ef578
JH
2035 break;
2036 case DISCOVERY_STARTING:
2037 break;
343f935b 2038 case DISCOVERY_FINDING:
ff9ef578
JH
2039 mgmt_discovering(hdev, 1);
2040 break;
30dc78e1
JH
2041 case DISCOVERY_RESOLVING:
2042 break;
ff9ef578
JH
2043 case DISCOVERY_STOPPING:
2044 break;
2045 }
ff9ef578
JH
2046}
2047
1f9b9a5d 2048void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2049{
30883512 2050 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2051 struct inquiry_entry *p, *n;
1da177e4 2052
561aafbc
JH
2053 list_for_each_entry_safe(p, n, &cache->all, all) {
2054 list_del(&p->all);
b57c1a56 2055 kfree(p);
1da177e4 2056 }
561aafbc
JH
2057
2058 INIT_LIST_HEAD(&cache->unknown);
2059 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2060}
2061
a8c5fb1a
GP
2062struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2063 bdaddr_t *bdaddr)
1da177e4 2064{
30883512 2065 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2066 struct inquiry_entry *e;
2067
6ed93dc6 2068 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2069
561aafbc
JH
2070 list_for_each_entry(e, &cache->all, all) {
2071 if (!bacmp(&e->data.bdaddr, bdaddr))
2072 return e;
2073 }
2074
2075 return NULL;
2076}
2077
2078struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2079 bdaddr_t *bdaddr)
561aafbc 2080{
30883512 2081 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2082 struct inquiry_entry *e;
2083
6ed93dc6 2084 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2085
2086 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2087 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2088 return e;
2089 }
2090
2091 return NULL;
1da177e4
LT
2092}
2093
30dc78e1 2094struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2095 bdaddr_t *bdaddr,
2096 int state)
30dc78e1
JH
2097{
2098 struct discovery_state *cache = &hdev->discovery;
2099 struct inquiry_entry *e;
2100
6ed93dc6 2101 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2102
2103 list_for_each_entry(e, &cache->resolve, list) {
2104 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2105 return e;
2106 if (!bacmp(&e->data.bdaddr, bdaddr))
2107 return e;
2108 }
2109
2110 return NULL;
2111}
2112
a3d4e20a 2113void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2114 struct inquiry_entry *ie)
a3d4e20a
JH
2115{
2116 struct discovery_state *cache = &hdev->discovery;
2117 struct list_head *pos = &cache->resolve;
2118 struct inquiry_entry *p;
2119
2120 list_del(&ie->list);
2121
2122 list_for_each_entry(p, &cache->resolve, list) {
2123 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2124 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2125 break;
2126 pos = &p->list;
2127 }
2128
2129 list_add(&ie->list, pos);
2130}
2131
af58925c
MH
2132u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2133 bool name_known)
1da177e4 2134{
30883512 2135 struct discovery_state *cache = &hdev->discovery;
70f23020 2136 struct inquiry_entry *ie;
af58925c 2137 u32 flags = 0;
1da177e4 2138
6ed93dc6 2139 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2140
2b2fec4d
SJ
2141 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2142
af58925c
MH
2143 if (!data->ssp_mode)
2144 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2145
70f23020 2146 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2147 if (ie) {
af58925c
MH
2148 if (!ie->data.ssp_mode)
2149 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2150
a3d4e20a 2151 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2152 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2153 ie->data.rssi = data->rssi;
2154 hci_inquiry_cache_update_resolve(hdev, ie);
2155 }
2156
561aafbc 2157 goto update;
a3d4e20a 2158 }
561aafbc
JH
2159
2160 /* Entry not in the cache. Add new one. */
27f70f3e 2161 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2162 if (!ie) {
2163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2164 goto done;
2165 }
561aafbc
JH
2166
2167 list_add(&ie->all, &cache->all);
2168
2169 if (name_known) {
2170 ie->name_state = NAME_KNOWN;
2171 } else {
2172 ie->name_state = NAME_NOT_KNOWN;
2173 list_add(&ie->list, &cache->unknown);
2174 }
70f23020 2175
561aafbc
JH
2176update:
2177 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2178 ie->name_state != NAME_PENDING) {
561aafbc
JH
2179 ie->name_state = NAME_KNOWN;
2180 list_del(&ie->list);
1da177e4
LT
2181 }
2182
70f23020
AE
2183 memcpy(&ie->data, data, sizeof(*data));
2184 ie->timestamp = jiffies;
1da177e4 2185 cache->timestamp = jiffies;
3175405b
JH
2186
2187 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2188 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2189
af58925c
MH
2190done:
2191 return flags;
1da177e4
LT
2192}
2193
2194static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2195{
30883512 2196 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2197 struct inquiry_info *info = (struct inquiry_info *) buf;
2198 struct inquiry_entry *e;
2199 int copied = 0;
2200
561aafbc 2201 list_for_each_entry(e, &cache->all, all) {
1da177e4 2202 struct inquiry_data *data = &e->data;
b57c1a56
JH
2203
2204 if (copied >= num)
2205 break;
2206
1da177e4
LT
2207 bacpy(&info->bdaddr, &data->bdaddr);
2208 info->pscan_rep_mode = data->pscan_rep_mode;
2209 info->pscan_period_mode = data->pscan_period_mode;
2210 info->pscan_mode = data->pscan_mode;
2211 memcpy(info->dev_class, data->dev_class, 3);
2212 info->clock_offset = data->clock_offset;
b57c1a56 2213
1da177e4 2214 info++;
b57c1a56 2215 copied++;
1da177e4
LT
2216 }
2217
2218 BT_DBG("cache %p, copied %d", cache, copied);
2219 return copied;
2220}
2221
42c6b129 2222static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2223{
2224 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2225 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2226 struct hci_cp_inquiry cp;
2227
2228 BT_DBG("%s", hdev->name);
2229
2230 if (test_bit(HCI_INQUIRY, &hdev->flags))
2231 return;
2232
2233 /* Start Inquiry */
2234 memcpy(&cp.lap, &ir->lap, 3);
2235 cp.length = ir->length;
2236 cp.num_rsp = ir->num_rsp;
42c6b129 2237 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2238}
2239
2240int hci_inquiry(void __user *arg)
2241{
2242 __u8 __user *ptr = arg;
2243 struct hci_inquiry_req ir;
2244 struct hci_dev *hdev;
2245 int err = 0, do_inquiry = 0, max_rsp;
2246 long timeo;
2247 __u8 *buf;
2248
2249 if (copy_from_user(&ir, ptr, sizeof(ir)))
2250 return -EFAULT;
2251
5a08ecce
AE
2252 hdev = hci_dev_get(ir.dev_id);
2253 if (!hdev)
1da177e4
LT
2254 return -ENODEV;
2255
0736cfa8
MH
2256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
4a964404 2261 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
5b69bef5
MH
2266 if (hdev->dev_type != HCI_BREDR) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
56f87901
JH
2271 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2272 err = -EOPNOTSUPP;
2273 goto done;
2274 }
2275
09fd0de5 2276 hci_dev_lock(hdev);
8e87d142 2277 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2278 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2279 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2280 do_inquiry = 1;
2281 }
09fd0de5 2282 hci_dev_unlock(hdev);
1da177e4 2283
04837f64 2284 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2285
2286 if (do_inquiry) {
01178cd4
JH
2287 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2288 timeo);
70f23020
AE
2289 if (err < 0)
2290 goto done;
3e13fa1e
AG
2291
2292 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2293 * cleared). If it is interrupted by a signal, return -EINTR.
2294 */
74316201 2295 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2296 TASK_INTERRUPTIBLE))
2297 return -EINTR;
70f23020 2298 }
1da177e4 2299
8fc9ced3
GP
2300 /* for unlimited number of responses we will use buffer with
2301 * 255 entries
2302 */
1da177e4
LT
2303 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2304
2305 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2306 * copy it to the user space.
2307 */
01df8c31 2308 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2309 if (!buf) {
1da177e4
LT
2310 err = -ENOMEM;
2311 goto done;
2312 }
2313
09fd0de5 2314 hci_dev_lock(hdev);
1da177e4 2315 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2316 hci_dev_unlock(hdev);
1da177e4
LT
2317
2318 BT_DBG("num_rsp %d", ir.num_rsp);
2319
2320 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2321 ptr += sizeof(ir);
2322 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2323 ir.num_rsp))
1da177e4 2324 err = -EFAULT;
8e87d142 2325 } else
1da177e4
LT
2326 err = -EFAULT;
2327
2328 kfree(buf);
2329
2330done:
2331 hci_dev_put(hdev);
2332 return err;
2333}
2334
cbed0ca1 2335static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2336{
1da177e4
LT
2337 int ret = 0;
2338
1da177e4
LT
2339 BT_DBG("%s %p", hdev->name, hdev);
2340
2341 hci_req_lock(hdev);
2342
94324962
JH
2343 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2344 ret = -ENODEV;
2345 goto done;
2346 }
2347
d603b76b
MH
2348 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2349 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2350 /* Check for rfkill but allow the HCI setup stage to
2351 * proceed (which in itself doesn't cause any RF activity).
2352 */
2353 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2354 ret = -ERFKILL;
2355 goto done;
2356 }
2357
2358 /* Check for valid public address or a configured static
2359 * random adddress, but let the HCI setup proceed to
2360 * be able to determine if there is a public address
2361 * or not.
2362 *
c6beca0e
MH
2363 * In case of user channel usage, it is not important
2364 * if a public address or static random address is
2365 * available.
2366 *
a5c8f270
MH
2367 * This check is only valid for BR/EDR controllers
2368 * since AMP controllers do not have an address.
2369 */
c6beca0e
MH
2370 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2371 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2372 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2373 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2374 ret = -EADDRNOTAVAIL;
2375 goto done;
2376 }
611b30f7
MH
2377 }
2378
1da177e4
LT
2379 if (test_bit(HCI_UP, &hdev->flags)) {
2380 ret = -EALREADY;
2381 goto done;
2382 }
2383
1da177e4
LT
2384 if (hdev->open(hdev)) {
2385 ret = -EIO;
2386 goto done;
2387 }
2388
f41c70c4
MH
2389 atomic_set(&hdev->cmd_cnt, 1);
2390 set_bit(HCI_INIT, &hdev->flags);
2391
af202f84
MH
2392 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2393 if (hdev->setup)
2394 ret = hdev->setup(hdev);
f41c70c4 2395
af202f84
MH
2396 /* The transport driver can set these quirks before
2397 * creating the HCI device or in its setup callback.
2398 *
2399 * In case any of them is set, the controller has to
2400 * start up as unconfigured.
2401 */
eb1904f4
MH
2402 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2403 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2404 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2405
0ebca7d6
MH
2406 /* For an unconfigured controller it is required to
2407 * read at least the version information provided by
2408 * the Read Local Version Information command.
2409 *
2410 * If the set_bdaddr driver callback is provided, then
2411 * also the original Bluetooth public device address
2412 * will be read using the Read BD Address command.
2413 */
2414 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2415 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2416 }
2417
9713c17b
MH
2418 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2419 /* If public address change is configured, ensure that
2420 * the address gets programmed. If the driver does not
2421 * support changing the public address, fail the power
2422 * on procedure.
2423 */
2424 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2425 hdev->set_bdaddr)
24c457e2
MH
2426 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2427 else
2428 ret = -EADDRNOTAVAIL;
2429 }
2430
f41c70c4 2431 if (!ret) {
4a964404 2432 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2433 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2434 ret = __hci_init(hdev);
1da177e4
LT
2435 }
2436
f41c70c4
MH
2437 clear_bit(HCI_INIT, &hdev->flags);
2438
1da177e4
LT
2439 if (!ret) {
2440 hci_dev_hold(hdev);
d6bfd59c 2441 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2442 set_bit(HCI_UP, &hdev->flags);
2443 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2444 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2445 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2446 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2447 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2448 hdev->dev_type == HCI_BREDR) {
09fd0de5 2449 hci_dev_lock(hdev);
744cf19e 2450 mgmt_powered(hdev, 1);
09fd0de5 2451 hci_dev_unlock(hdev);
56e5cb86 2452 }
8e87d142 2453 } else {
1da177e4 2454 /* Init failed, cleanup */
3eff45ea 2455 flush_work(&hdev->tx_work);
c347b765 2456 flush_work(&hdev->cmd_work);
b78752cc 2457 flush_work(&hdev->rx_work);
1da177e4
LT
2458
2459 skb_queue_purge(&hdev->cmd_q);
2460 skb_queue_purge(&hdev->rx_q);
2461
2462 if (hdev->flush)
2463 hdev->flush(hdev);
2464
2465 if (hdev->sent_cmd) {
2466 kfree_skb(hdev->sent_cmd);
2467 hdev->sent_cmd = NULL;
2468 }
2469
2470 hdev->close(hdev);
fee746b0 2471 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2472 }
2473
2474done:
2475 hci_req_unlock(hdev);
1da177e4
LT
2476 return ret;
2477}
2478
cbed0ca1
JH
2479/* ---- HCI ioctl helpers ---- */
2480
2481int hci_dev_open(__u16 dev)
2482{
2483 struct hci_dev *hdev;
2484 int err;
2485
2486 hdev = hci_dev_get(dev);
2487 if (!hdev)
2488 return -ENODEV;
2489
4a964404 2490 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2491 * up as user channel. Trying to bring them up as normal devices
2492 * will result into a failure. Only user channel operation is
2493 * possible.
2494 *
2495 * When this function is called for a user channel, the flag
2496 * HCI_USER_CHANNEL will be set first before attempting to
2497 * open the device.
2498 */
4a964404 2499 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2500 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 err = -EOPNOTSUPP;
2502 goto done;
2503 }
2504
e1d08f40
JH
2505 /* We need to ensure that no other power on/off work is pending
2506 * before proceeding to call hci_dev_do_open. This is
2507 * particularly important if the setup procedure has not yet
2508 * completed.
2509 */
2510 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2511 cancel_delayed_work(&hdev->power_off);
2512
a5c8f270
MH
2513 /* After this call it is guaranteed that the setup procedure
2514 * has finished. This means that error conditions like RFKILL
2515 * or no valid public or static random address apply.
2516 */
e1d08f40
JH
2517 flush_workqueue(hdev->req_workqueue);
2518
12aa4f0a 2519 /* For controllers not using the management interface and that
b6ae8457 2520 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2521 * so that pairing works for them. Once the management interface
2522 * is in use this bit will be cleared again and userspace has
2523 * to explicitly enable it.
2524 */
2525 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2526 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2527 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2528
cbed0ca1
JH
2529 err = hci_dev_do_open(hdev);
2530
fee746b0 2531done:
cbed0ca1 2532 hci_dev_put(hdev);
cbed0ca1
JH
2533 return err;
2534}
2535
d7347f3c
JH
2536/* This function requires the caller holds hdev->lock */
2537static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2538{
2539 struct hci_conn_params *p;
2540
2541 list_for_each_entry(p, &hdev->le_conn_params, list)
2542 list_del_init(&p->action);
2543
2544 BT_DBG("All LE pending actions cleared");
2545}
2546
1da177e4
LT
2547static int hci_dev_do_close(struct hci_dev *hdev)
2548{
2549 BT_DBG("%s %p", hdev->name, hdev);
2550
78c04c0b
VCG
2551 cancel_delayed_work(&hdev->power_off);
2552
1da177e4
LT
2553 hci_req_cancel(hdev, ENODEV);
2554 hci_req_lock(hdev);
2555
2556 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2557 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2558 hci_req_unlock(hdev);
2559 return 0;
2560 }
2561
3eff45ea
GP
2562 /* Flush RX and TX works */
2563 flush_work(&hdev->tx_work);
b78752cc 2564 flush_work(&hdev->rx_work);
1da177e4 2565
16ab91ab 2566 if (hdev->discov_timeout > 0) {
e0f9309f 2567 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2568 hdev->discov_timeout = 0;
5e5282bb 2569 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2570 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2571 }
2572
a8b2d5c2 2573 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2574 cancel_delayed_work(&hdev->service_cache);
2575
7ba8b4be 2576 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2577
2578 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2579 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2580
09fd0de5 2581 hci_dev_lock(hdev);
1f9b9a5d 2582 hci_inquiry_cache_flush(hdev);
1da177e4 2583 hci_conn_hash_flush(hdev);
d7347f3c 2584 hci_pend_le_actions_clear(hdev);
09fd0de5 2585 hci_dev_unlock(hdev);
1da177e4
LT
2586
2587 hci_notify(hdev, HCI_DEV_DOWN);
2588
2589 if (hdev->flush)
2590 hdev->flush(hdev);
2591
2592 /* Reset device */
2593 skb_queue_purge(&hdev->cmd_q);
2594 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2595 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2596 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2597 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2598 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2599 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2600 clear_bit(HCI_INIT, &hdev->flags);
2601 }
2602
c347b765
GP
2603 /* flush cmd work */
2604 flush_work(&hdev->cmd_work);
1da177e4
LT
2605
2606 /* Drop queues */
2607 skb_queue_purge(&hdev->rx_q);
2608 skb_queue_purge(&hdev->cmd_q);
2609 skb_queue_purge(&hdev->raw_q);
2610
2611 /* Drop last sent command */
2612 if (hdev->sent_cmd) {
65cc2b49 2613 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2614 kfree_skb(hdev->sent_cmd);
2615 hdev->sent_cmd = NULL;
2616 }
2617
b6ddb638
JH
2618 kfree_skb(hdev->recv_evt);
2619 hdev->recv_evt = NULL;
2620
1da177e4
LT
2621 /* After this point our queues are empty
2622 * and no tasks are scheduled. */
2623 hdev->close(hdev);
2624
35b973c9 2625 /* Clear flags */
fee746b0 2626 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2627 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2628
93c311a0
MH
2629 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2630 if (hdev->dev_type == HCI_BREDR) {
2631 hci_dev_lock(hdev);
2632 mgmt_powered(hdev, 0);
2633 hci_dev_unlock(hdev);
2634 }
8ee56540 2635 }
5add6af8 2636
ced5c338 2637 /* Controller radio is available but is currently powered down */
536619e8 2638 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2639
e59fda8d 2640 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2641 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2642 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2643
1da177e4
LT
2644 hci_req_unlock(hdev);
2645
2646 hci_dev_put(hdev);
2647 return 0;
2648}
2649
2650int hci_dev_close(__u16 dev)
2651{
2652 struct hci_dev *hdev;
2653 int err;
2654
70f23020
AE
2655 hdev = hci_dev_get(dev);
2656 if (!hdev)
1da177e4 2657 return -ENODEV;
8ee56540 2658
0736cfa8
MH
2659 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2660 err = -EBUSY;
2661 goto done;
2662 }
2663
8ee56540
MH
2664 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2665 cancel_delayed_work(&hdev->power_off);
2666
1da177e4 2667 err = hci_dev_do_close(hdev);
8ee56540 2668
0736cfa8 2669done:
1da177e4
LT
2670 hci_dev_put(hdev);
2671 return err;
2672}
2673
2674int hci_dev_reset(__u16 dev)
2675{
2676 struct hci_dev *hdev;
2677 int ret = 0;
2678
70f23020
AE
2679 hdev = hci_dev_get(dev);
2680 if (!hdev)
1da177e4
LT
2681 return -ENODEV;
2682
2683 hci_req_lock(hdev);
1da177e4 2684
808a049e
MH
2685 if (!test_bit(HCI_UP, &hdev->flags)) {
2686 ret = -ENETDOWN;
1da177e4 2687 goto done;
808a049e 2688 }
1da177e4 2689
0736cfa8
MH
2690 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2691 ret = -EBUSY;
2692 goto done;
2693 }
2694
4a964404 2695 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2696 ret = -EOPNOTSUPP;
2697 goto done;
2698 }
2699
1da177e4
LT
2700 /* Drop queues */
2701 skb_queue_purge(&hdev->rx_q);
2702 skb_queue_purge(&hdev->cmd_q);
2703
09fd0de5 2704 hci_dev_lock(hdev);
1f9b9a5d 2705 hci_inquiry_cache_flush(hdev);
1da177e4 2706 hci_conn_hash_flush(hdev);
09fd0de5 2707 hci_dev_unlock(hdev);
1da177e4
LT
2708
2709 if (hdev->flush)
2710 hdev->flush(hdev);
2711
8e87d142 2712 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2713 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2714
fee746b0 2715 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2716
2717done:
1da177e4
LT
2718 hci_req_unlock(hdev);
2719 hci_dev_put(hdev);
2720 return ret;
2721}
2722
2723int hci_dev_reset_stat(__u16 dev)
2724{
2725 struct hci_dev *hdev;
2726 int ret = 0;
2727
70f23020
AE
2728 hdev = hci_dev_get(dev);
2729 if (!hdev)
1da177e4
LT
2730 return -ENODEV;
2731
0736cfa8
MH
2732 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2733 ret = -EBUSY;
2734 goto done;
2735 }
2736
4a964404 2737 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2738 ret = -EOPNOTSUPP;
2739 goto done;
2740 }
2741
1da177e4
LT
2742 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2743
0736cfa8 2744done:
1da177e4 2745 hci_dev_put(hdev);
1da177e4
LT
2746 return ret;
2747}
2748
123abc08
JH
2749static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2750{
bc6d2d04 2751 bool conn_changed, discov_changed;
123abc08
JH
2752
2753 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2754
2755 if ((scan & SCAN_PAGE))
2756 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2757 &hdev->dev_flags);
2758 else
2759 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2760 &hdev->dev_flags);
2761
bc6d2d04
JH
2762 if ((scan & SCAN_INQUIRY)) {
2763 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2764 &hdev->dev_flags);
2765 } else {
2766 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2767 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2768 &hdev->dev_flags);
2769 }
2770
123abc08
JH
2771 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2772 return;
2773
bc6d2d04
JH
2774 if (conn_changed || discov_changed) {
2775 /* In case this was disabled through mgmt */
2776 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2777
2778 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2779 mgmt_update_adv_data(hdev);
2780
123abc08 2781 mgmt_new_settings(hdev);
bc6d2d04 2782 }
123abc08
JH
2783}
2784
1da177e4
LT
2785int hci_dev_cmd(unsigned int cmd, void __user *arg)
2786{
2787 struct hci_dev *hdev;
2788 struct hci_dev_req dr;
2789 int err = 0;
2790
2791 if (copy_from_user(&dr, arg, sizeof(dr)))
2792 return -EFAULT;
2793
70f23020
AE
2794 hdev = hci_dev_get(dr.dev_id);
2795 if (!hdev)
1da177e4
LT
2796 return -ENODEV;
2797
0736cfa8
MH
2798 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2799 err = -EBUSY;
2800 goto done;
2801 }
2802
4a964404 2803 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2804 err = -EOPNOTSUPP;
2805 goto done;
2806 }
2807
5b69bef5
MH
2808 if (hdev->dev_type != HCI_BREDR) {
2809 err = -EOPNOTSUPP;
2810 goto done;
2811 }
2812
56f87901
JH
2813 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2814 err = -EOPNOTSUPP;
2815 goto done;
2816 }
2817
1da177e4
LT
2818 switch (cmd) {
2819 case HCISETAUTH:
01178cd4
JH
2820 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2821 HCI_INIT_TIMEOUT);
1da177e4
LT
2822 break;
2823
2824 case HCISETENCRYPT:
2825 if (!lmp_encrypt_capable(hdev)) {
2826 err = -EOPNOTSUPP;
2827 break;
2828 }
2829
2830 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2831 /* Auth must be enabled first */
01178cd4
JH
2832 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2833 HCI_INIT_TIMEOUT);
1da177e4
LT
2834 if (err)
2835 break;
2836 }
2837
01178cd4
JH
2838 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
1da177e4
LT
2840 break;
2841
2842 case HCISETSCAN:
01178cd4
JH
2843 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2844 HCI_INIT_TIMEOUT);
91a668b0 2845
bc6d2d04
JH
2846 /* Ensure that the connectable and discoverable states
2847 * get correctly modified as this was a non-mgmt change.
91a668b0 2848 */
123abc08
JH
2849 if (!err)
2850 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2851 break;
2852
1da177e4 2853 case HCISETLINKPOL:
01178cd4
JH
2854 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2855 HCI_INIT_TIMEOUT);
1da177e4
LT
2856 break;
2857
2858 case HCISETLINKMODE:
e4e8e37c
MH
2859 hdev->link_mode = ((__u16) dr.dev_opt) &
2860 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2861 break;
2862
2863 case HCISETPTYPE:
2864 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2865 break;
2866
2867 case HCISETACLMTU:
e4e8e37c
MH
2868 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2869 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2870 break;
2871
2872 case HCISETSCOMTU:
e4e8e37c
MH
2873 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2874 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2875 break;
2876
2877 default:
2878 err = -EINVAL;
2879 break;
2880 }
e4e8e37c 2881
0736cfa8 2882done:
1da177e4
LT
2883 hci_dev_put(hdev);
2884 return err;
2885}
2886
2887int hci_get_dev_list(void __user *arg)
2888{
8035ded4 2889 struct hci_dev *hdev;
1da177e4
LT
2890 struct hci_dev_list_req *dl;
2891 struct hci_dev_req *dr;
1da177e4
LT
2892 int n = 0, size, err;
2893 __u16 dev_num;
2894
2895 if (get_user(dev_num, (__u16 __user *) arg))
2896 return -EFAULT;
2897
2898 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2899 return -EINVAL;
2900
2901 size = sizeof(*dl) + dev_num * sizeof(*dr);
2902
70f23020
AE
2903 dl = kzalloc(size, GFP_KERNEL);
2904 if (!dl)
1da177e4
LT
2905 return -ENOMEM;
2906
2907 dr = dl->dev_req;
2908
f20d09d5 2909 read_lock(&hci_dev_list_lock);
8035ded4 2910 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2911 unsigned long flags = hdev->flags;
c542a06c 2912
2e84d8db
MH
2913 /* When the auto-off is configured it means the transport
2914 * is running, but in that case still indicate that the
2915 * device is actually down.
2916 */
2917 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2918 flags &= ~BIT(HCI_UP);
c542a06c 2919
1da177e4 2920 (dr + n)->dev_id = hdev->id;
2e84d8db 2921 (dr + n)->dev_opt = flags;
c542a06c 2922
1da177e4
LT
2923 if (++n >= dev_num)
2924 break;
2925 }
f20d09d5 2926 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2927
2928 dl->dev_num = n;
2929 size = sizeof(*dl) + n * sizeof(*dr);
2930
2931 err = copy_to_user(arg, dl, size);
2932 kfree(dl);
2933
2934 return err ? -EFAULT : 0;
2935}
2936
2937int hci_get_dev_info(void __user *arg)
2938{
2939 struct hci_dev *hdev;
2940 struct hci_dev_info di;
2e84d8db 2941 unsigned long flags;
1da177e4
LT
2942 int err = 0;
2943
2944 if (copy_from_user(&di, arg, sizeof(di)))
2945 return -EFAULT;
2946
70f23020
AE
2947 hdev = hci_dev_get(di.dev_id);
2948 if (!hdev)
1da177e4
LT
2949 return -ENODEV;
2950
2e84d8db
MH
2951 /* When the auto-off is configured it means the transport
2952 * is running, but in that case still indicate that the
2953 * device is actually down.
2954 */
2955 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2956 flags = hdev->flags & ~BIT(HCI_UP);
2957 else
2958 flags = hdev->flags;
c542a06c 2959
1da177e4
LT
2960 strcpy(di.name, hdev->name);
2961 di.bdaddr = hdev->bdaddr;
60f2a3ed 2962 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2963 di.flags = flags;
1da177e4 2964 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2965 if (lmp_bredr_capable(hdev)) {
2966 di.acl_mtu = hdev->acl_mtu;
2967 di.acl_pkts = hdev->acl_pkts;
2968 di.sco_mtu = hdev->sco_mtu;
2969 di.sco_pkts = hdev->sco_pkts;
2970 } else {
2971 di.acl_mtu = hdev->le_mtu;
2972 di.acl_pkts = hdev->le_pkts;
2973 di.sco_mtu = 0;
2974 di.sco_pkts = 0;
2975 }
1da177e4
LT
2976 di.link_policy = hdev->link_policy;
2977 di.link_mode = hdev->link_mode;
2978
2979 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2980 memcpy(&di.features, &hdev->features, sizeof(di.features));
2981
2982 if (copy_to_user(arg, &di, sizeof(di)))
2983 err = -EFAULT;
2984
2985 hci_dev_put(hdev);
2986
2987 return err;
2988}
2989
2990/* ---- Interface to HCI drivers ---- */
2991
611b30f7
MH
2992static int hci_rfkill_set_block(void *data, bool blocked)
2993{
2994 struct hci_dev *hdev = data;
2995
2996 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2997
0736cfa8
MH
2998 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2999 return -EBUSY;
3000
5e130367
JH
3001 if (blocked) {
3002 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
3003 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3004 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 3005 hci_dev_do_close(hdev);
5e130367
JH
3006 } else {
3007 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3008 }
611b30f7
MH
3009
3010 return 0;
3011}
3012
3013static const struct rfkill_ops hci_rfkill_ops = {
3014 .set_block = hci_rfkill_set_block,
3015};
3016
ab81cbf9
JH
3017static void hci_power_on(struct work_struct *work)
3018{
3019 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3020 int err;
ab81cbf9
JH
3021
3022 BT_DBG("%s", hdev->name);
3023
cbed0ca1 3024 err = hci_dev_do_open(hdev);
96570ffc
JH
3025 if (err < 0) {
3026 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3027 return;
96570ffc 3028 }
ab81cbf9 3029
a5c8f270
MH
3030 /* During the HCI setup phase, a few error conditions are
3031 * ignored and they need to be checked now. If they are still
3032 * valid, it is important to turn the device back off.
3033 */
3034 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3035 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3036 (hdev->dev_type == HCI_BREDR &&
3037 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3038 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3039 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3040 hci_dev_do_close(hdev);
3041 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3042 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3043 HCI_AUTO_OFF_TIMEOUT);
bf543036 3044 }
ab81cbf9 3045
fee746b0 3046 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3047 /* For unconfigured devices, set the HCI_RAW flag
3048 * so that userspace can easily identify them.
4a964404
MH
3049 */
3050 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3051 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3052
3053 /* For fully configured devices, this will send
3054 * the Index Added event. For unconfigured devices,
3055 * it will send Unconfigued Index Added event.
3056 *
3057 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3058 * and no event will be send.
3059 */
3060 mgmt_index_added(hdev);
d603b76b 3061 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3062 /* When the controller is now configured, then it
3063 * is important to clear the HCI_RAW flag.
3064 */
3065 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3066 clear_bit(HCI_RAW, &hdev->flags);
3067
d603b76b
MH
3068 /* Powering on the controller with HCI_CONFIG set only
3069 * happens with the transition from unconfigured to
3070 * configured. This will send the Index Added event.
3071 */
744cf19e 3072 mgmt_index_added(hdev);
fee746b0 3073 }
ab81cbf9
JH
3074}
3075
3076static void hci_power_off(struct work_struct *work)
3077{
3243553f 3078 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3079 power_off.work);
ab81cbf9
JH
3080
3081 BT_DBG("%s", hdev->name);
3082
8ee56540 3083 hci_dev_do_close(hdev);
ab81cbf9
JH
3084}
3085
16ab91ab
JH
3086static void hci_discov_off(struct work_struct *work)
3087{
3088 struct hci_dev *hdev;
16ab91ab
JH
3089
3090 hdev = container_of(work, struct hci_dev, discov_off.work);
3091
3092 BT_DBG("%s", hdev->name);
3093
d1967ff8 3094 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3095}
3096
35f7498a 3097void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3098{
4821002c 3099 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3100
4821002c
JH
3101 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3102 list_del(&uuid->list);
2aeb9a1a
JH
3103 kfree(uuid);
3104 }
2aeb9a1a
JH
3105}
3106
35f7498a 3107void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3108{
3109 struct list_head *p, *n;
3110
3111 list_for_each_safe(p, n, &hdev->link_keys) {
3112 struct link_key *key;
3113
3114 key = list_entry(p, struct link_key, list);
3115
3116 list_del(p);
3117 kfree(key);
3118 }
55ed8ca1
JH
3119}
3120
35f7498a 3121void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
3122{
3123 struct smp_ltk *k, *tmp;
3124
3125 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3126 list_del(&k->list);
3127 kfree(k);
3128 }
b899efaf
VCG
3129}
3130
970c4e46
JH
3131void hci_smp_irks_clear(struct hci_dev *hdev)
3132{
3133 struct smp_irk *k, *tmp;
3134
3135 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3136 list_del(&k->list);
3137 kfree(k);
3138 }
3139}
3140
55ed8ca1
JH
3141struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3142{
8035ded4 3143 struct link_key *k;
55ed8ca1 3144
8035ded4 3145 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3146 if (bacmp(bdaddr, &k->bdaddr) == 0)
3147 return k;
55ed8ca1
JH
3148
3149 return NULL;
3150}
3151
745c0ce3 3152static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3153 u8 key_type, u8 old_key_type)
d25e28ab
JH
3154{
3155 /* Legacy key */
3156 if (key_type < 0x03)
745c0ce3 3157 return true;
d25e28ab
JH
3158
3159 /* Debug keys are insecure so don't store them persistently */
3160 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3161 return false;
d25e28ab
JH
3162
3163 /* Changed combination key and there's no previous one */
3164 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3165 return false;
d25e28ab
JH
3166
3167 /* Security mode 3 case */
3168 if (!conn)
745c0ce3 3169 return true;
d25e28ab
JH
3170
3171 /* Neither local nor remote side had no-bonding as requirement */
3172 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3173 return true;
d25e28ab
JH
3174
3175 /* Local side had dedicated bonding as requirement */
3176 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3177 return true;
d25e28ab
JH
3178
3179 /* Remote side had dedicated bonding as requirement */
3180 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3181 return true;
d25e28ab
JH
3182
3183 /* If none of the above criteria match, then don't store the key
3184 * persistently */
745c0ce3 3185 return false;
d25e28ab
JH
3186}
3187
e804d25d 3188static u8 ltk_role(u8 type)
98a0b845 3189{
e804d25d
JH
3190 if (type == SMP_LTK)
3191 return HCI_ROLE_MASTER;
98a0b845 3192
e804d25d 3193 return HCI_ROLE_SLAVE;
98a0b845
JH
3194}
3195
fe39c7b2 3196struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3197 u8 role)
75d262c2 3198{
c9839a11 3199 struct smp_ltk *k;
75d262c2 3200
c9839a11 3201 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3202 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3203 continue;
3204
e804d25d 3205 if (ltk_role(k->type) != role)
98a0b845
JH
3206 continue;
3207
c9839a11 3208 return k;
75d262c2
VCG
3209 }
3210
3211 return NULL;
3212}
75d262c2 3213
c9839a11 3214struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3215 u8 addr_type, u8 role)
75d262c2 3216{
c9839a11 3217 struct smp_ltk *k;
75d262c2 3218
c9839a11
VCG
3219 list_for_each_entry(k, &hdev->long_term_keys, list)
3220 if (addr_type == k->bdaddr_type &&
98a0b845 3221 bacmp(bdaddr, &k->bdaddr) == 0 &&
e804d25d 3222 ltk_role(k->type) == role)
75d262c2
VCG
3223 return k;
3224
3225 return NULL;
3226}
75d262c2 3227
970c4e46
JH
3228struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3229{
3230 struct smp_irk *irk;
3231
3232 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3233 if (!bacmp(&irk->rpa, rpa))
3234 return irk;
3235 }
3236
893edede
JH
3237 if (!hdev->tfm_aes)
3238 return NULL;
3239
970c4e46
JH
3240 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3241 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3242 bacpy(&irk->rpa, rpa);
3243 return irk;
3244 }
3245 }
3246
3247 return NULL;
3248}
3249
3250struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3251 u8 addr_type)
3252{
3253 struct smp_irk *irk;
3254
6cfc9988
JH
3255 /* Identity Address must be public or static random */
3256 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3257 return NULL;
3258
970c4e46
JH
3259 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3260 if (addr_type == irk->addr_type &&
3261 bacmp(bdaddr, &irk->bdaddr) == 0)
3262 return irk;
3263 }
3264
3265 return NULL;
3266}
3267
567fa2aa 3268struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3269 bdaddr_t *bdaddr, u8 *val, u8 type,
3270 u8 pin_len, bool *persistent)
55ed8ca1
JH
3271{
3272 struct link_key *key, *old_key;
745c0ce3 3273 u8 old_key_type;
55ed8ca1
JH
3274
3275 old_key = hci_find_link_key(hdev, bdaddr);
3276 if (old_key) {
3277 old_key_type = old_key->type;
3278 key = old_key;
3279 } else {
12adcf3a 3280 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3281 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3282 if (!key)
567fa2aa 3283 return NULL;
55ed8ca1
JH
3284 list_add(&key->list, &hdev->link_keys);
3285 }
3286
6ed93dc6 3287 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3288
d25e28ab
JH
3289 /* Some buggy controller combinations generate a changed
3290 * combination key for legacy pairing even when there's no
3291 * previous key */
3292 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3293 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3294 type = HCI_LK_COMBINATION;
655fe6ec
JH
3295 if (conn)
3296 conn->key_type = type;
3297 }
d25e28ab 3298
55ed8ca1 3299 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3300 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3301 key->pin_len = pin_len;
3302
b6020ba0 3303 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3304 key->type = old_key_type;
4748fed2
JH
3305 else
3306 key->type = type;
3307
7652ff6a
JH
3308 if (persistent)
3309 *persistent = hci_persistent_key(hdev, conn, type,
3310 old_key_type);
4df378a1 3311
567fa2aa 3312 return key;
55ed8ca1
JH
3313}
3314
ca9142b8 3315struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3316 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3317 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3318{
c9839a11 3319 struct smp_ltk *key, *old_key;
e804d25d 3320 u8 role = ltk_role(type);
75d262c2 3321
e804d25d 3322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3323 if (old_key)
75d262c2 3324 key = old_key;
c9839a11 3325 else {
0a14ab41 3326 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3327 if (!key)
ca9142b8 3328 return NULL;
c9839a11 3329 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3330 }
3331
75d262c2 3332 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3333 key->bdaddr_type = addr_type;
3334 memcpy(key->val, tk, sizeof(key->val));
3335 key->authenticated = authenticated;
3336 key->ediv = ediv;
fe39c7b2 3337 key->rand = rand;
c9839a11
VCG
3338 key->enc_size = enc_size;
3339 key->type = type;
75d262c2 3340
ca9142b8 3341 return key;
75d262c2
VCG
3342}
3343
ca9142b8
JH
3344struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3345 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3346{
3347 struct smp_irk *irk;
3348
3349 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3350 if (!irk) {
3351 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3352 if (!irk)
ca9142b8 3353 return NULL;
970c4e46
JH
3354
3355 bacpy(&irk->bdaddr, bdaddr);
3356 irk->addr_type = addr_type;
3357
3358 list_add(&irk->list, &hdev->identity_resolving_keys);
3359 }
3360
3361 memcpy(irk->val, val, 16);
3362 bacpy(&irk->rpa, rpa);
3363
ca9142b8 3364 return irk;
970c4e46
JH
3365}
3366
55ed8ca1
JH
3367int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3368{
3369 struct link_key *key;
3370
3371 key = hci_find_link_key(hdev, bdaddr);
3372 if (!key)
3373 return -ENOENT;
3374
6ed93dc6 3375 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3376
3377 list_del(&key->list);
3378 kfree(key);
3379
3380 return 0;
3381}
3382
e0b2b27e 3383int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3384{
3385 struct smp_ltk *k, *tmp;
c51ffa0b 3386 int removed = 0;
b899efaf
VCG
3387
3388 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3389 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3390 continue;
3391
6ed93dc6 3392 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3393
3394 list_del(&k->list);
3395 kfree(k);
c51ffa0b 3396 removed++;
b899efaf
VCG
3397 }
3398
c51ffa0b 3399 return removed ? 0 : -ENOENT;
b899efaf
VCG
3400}
3401
a7ec7338
JH
3402void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3403{
3404 struct smp_irk *k, *tmp;
3405
668b7b19 3406 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3407 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3408 continue;
3409
3410 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3411
3412 list_del(&k->list);
3413 kfree(k);
3414 }
3415}
3416
6bd32326 3417/* HCI command timer function */
65cc2b49 3418static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3419{
65cc2b49
MH
3420 struct hci_dev *hdev = container_of(work, struct hci_dev,
3421 cmd_timer.work);
6bd32326 3422
bda4f23a
AE
3423 if (hdev->sent_cmd) {
3424 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3425 u16 opcode = __le16_to_cpu(sent->opcode);
3426
3427 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3428 } else {
3429 BT_ERR("%s command tx timeout", hdev->name);
3430 }
3431
6bd32326 3432 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3433 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3434}
3435
2763eda6 3436struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3437 bdaddr_t *bdaddr)
2763eda6
SJ
3438{
3439 struct oob_data *data;
3440
3441 list_for_each_entry(data, &hdev->remote_oob_data, list)
3442 if (bacmp(bdaddr, &data->bdaddr) == 0)
3443 return data;
3444
3445 return NULL;
3446}
3447
3448int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3449{
3450 struct oob_data *data;
3451
3452 data = hci_find_remote_oob_data(hdev, bdaddr);
3453 if (!data)
3454 return -ENOENT;
3455
6ed93dc6 3456 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3457
3458 list_del(&data->list);
3459 kfree(data);
3460
3461 return 0;
3462}
3463
35f7498a 3464void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3465{
3466 struct oob_data *data, *n;
3467
3468 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3469 list_del(&data->list);
3470 kfree(data);
3471 }
2763eda6
SJ
3472}
3473
0798872e
MH
3474int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3475 u8 *hash, u8 *randomizer)
2763eda6
SJ
3476{
3477 struct oob_data *data;
3478
3479 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3480 if (!data) {
0a14ab41 3481 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3482 if (!data)
3483 return -ENOMEM;
3484
3485 bacpy(&data->bdaddr, bdaddr);
3486 list_add(&data->list, &hdev->remote_oob_data);
3487 }
3488
519ca9d0
MH
3489 memcpy(data->hash192, hash, sizeof(data->hash192));
3490 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3491
0798872e
MH
3492 memset(data->hash256, 0, sizeof(data->hash256));
3493 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3494
3495 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3496
3497 return 0;
3498}
3499
3500int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501 u8 *hash192, u8 *randomizer192,
3502 u8 *hash256, u8 *randomizer256)
3503{
3504 struct oob_data *data;
3505
3506 data = hci_find_remote_oob_data(hdev, bdaddr);
3507 if (!data) {
0a14ab41 3508 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3509 if (!data)
3510 return -ENOMEM;
3511
3512 bacpy(&data->bdaddr, bdaddr);
3513 list_add(&data->list, &hdev->remote_oob_data);
3514 }
3515
3516 memcpy(data->hash192, hash192, sizeof(data->hash192));
3517 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3518
3519 memcpy(data->hash256, hash256, sizeof(data->hash256));
3520 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3521
6ed93dc6 3522 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3523
3524 return 0;
3525}
3526
dcc36c16 3527struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3528 bdaddr_t *bdaddr, u8 type)
b2a66aad 3529{
8035ded4 3530 struct bdaddr_list *b;
b2a66aad 3531
dcc36c16 3532 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3533 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3534 return b;
b9ee0a78 3535 }
b2a66aad
AJ
3536
3537 return NULL;
3538}
3539
dcc36c16 3540void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3541{
3542 struct list_head *p, *n;
3543
dcc36c16 3544 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3545 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3546
3547 list_del(p);
3548 kfree(b);
3549 }
b2a66aad
AJ
3550}
3551
dcc36c16 3552int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3553{
3554 struct bdaddr_list *entry;
b2a66aad 3555
b9ee0a78 3556 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3557 return -EBADF;
3558
dcc36c16 3559 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3560 return -EEXIST;
b2a66aad 3561
27f70f3e 3562 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3563 if (!entry)
3564 return -ENOMEM;
b2a66aad
AJ
3565
3566 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3567 entry->bdaddr_type = type;
b2a66aad 3568
dcc36c16 3569 list_add(&entry->list, list);
b2a66aad 3570
2a8357f2 3571 return 0;
b2a66aad
AJ
3572}
3573
dcc36c16 3574int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3575{
3576 struct bdaddr_list *entry;
b2a66aad 3577
35f7498a 3578 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3579 hci_bdaddr_list_clear(list);
35f7498a
JH
3580 return 0;
3581 }
b2a66aad 3582
dcc36c16 3583 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3584 if (!entry)
3585 return -ENOENT;
3586
3587 list_del(&entry->list);
3588 kfree(entry);
3589
3590 return 0;
3591}
3592
15819a70
AG
3593/* This function requires the caller holds hdev->lock */
3594struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3595 bdaddr_t *addr, u8 addr_type)
3596{
3597 struct hci_conn_params *params;
3598
738f6185
JH
3599 /* The conn params list only contains identity addresses */
3600 if (!hci_is_identity_address(addr, addr_type))
3601 return NULL;
3602
15819a70
AG
3603 list_for_each_entry(params, &hdev->le_conn_params, list) {
3604 if (bacmp(&params->addr, addr) == 0 &&
3605 params->addr_type == addr_type) {
3606 return params;
3607 }
3608 }
3609
3610 return NULL;
3611}
3612
cef952ce
AG
3613static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3614{
3615 struct hci_conn *conn;
3616
3617 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3618 if (!conn)
3619 return false;
3620
3621 if (conn->dst_type != type)
3622 return false;
3623
3624 if (conn->state != BT_CONNECTED)
3625 return false;
3626
3627 return true;
3628}
3629
4b10966f 3630/* This function requires the caller holds hdev->lock */
501f8827
JH
3631struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3632 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3633{
912b42ef 3634 struct hci_conn_params *param;
a9b0a04c 3635
738f6185
JH
3636 /* The list only contains identity addresses */
3637 if (!hci_is_identity_address(addr, addr_type))
3638 return NULL;
a9b0a04c 3639
501f8827 3640 list_for_each_entry(param, list, action) {
912b42ef
JH
3641 if (bacmp(&param->addr, addr) == 0 &&
3642 param->addr_type == addr_type)
3643 return param;
4b10966f
MH
3644 }
3645
3646 return NULL;
a9b0a04c
AG
3647}
3648
15819a70 3649/* This function requires the caller holds hdev->lock */
51d167c0
MH
3650struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3651 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3652{
3653 struct hci_conn_params *params;
3654
c46245b3 3655 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3656 return NULL;
a9b0a04c 3657
15819a70 3658 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3659 if (params)
51d167c0 3660 return params;
15819a70
AG
3661
3662 params = kzalloc(sizeof(*params), GFP_KERNEL);
3663 if (!params) {
3664 BT_ERR("Out of memory");
51d167c0 3665 return NULL;
15819a70
AG
3666 }
3667
3668 bacpy(&params->addr, addr);
3669 params->addr_type = addr_type;
cef952ce
AG
3670
3671 list_add(&params->list, &hdev->le_conn_params);
93450c75 3672 INIT_LIST_HEAD(&params->action);
cef952ce 3673
bf5b3c8b
MH
3674 params->conn_min_interval = hdev->le_conn_min_interval;
3675 params->conn_max_interval = hdev->le_conn_max_interval;
3676 params->conn_latency = hdev->le_conn_latency;
3677 params->supervision_timeout = hdev->le_supv_timeout;
3678 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3679
3680 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3681
51d167c0 3682 return params;
bf5b3c8b
MH
3683}
3684
3685/* This function requires the caller holds hdev->lock */
3686int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3687 u8 auto_connect)
15819a70
AG
3688{
3689 struct hci_conn_params *params;
3690
8c87aae1
MH
3691 params = hci_conn_params_add(hdev, addr, addr_type);
3692 if (!params)
3693 return -EIO;
cef952ce 3694
42ce26de
JH
3695 if (params->auto_connect == auto_connect)
3696 return 0;
3697
95305baa 3698 list_del_init(&params->action);
15819a70 3699
cef952ce
AG
3700 switch (auto_connect) {
3701 case HCI_AUTO_CONN_DISABLED:
3702 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3703 hci_update_background_scan(hdev);
cef952ce 3704 break;
851efca8 3705 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3706 list_add(&params->action, &hdev->pend_le_reports);
3707 hci_update_background_scan(hdev);
cef952ce 3708 break;
4b9e7e75 3709 case HCI_AUTO_CONN_DIRECT:
cef952ce 3710 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3711 if (!is_connected(hdev, addr, addr_type)) {
3712 list_add(&params->action, &hdev->pend_le_conns);
3713 hci_update_background_scan(hdev);
3714 }
cef952ce
AG
3715 break;
3716 }
15819a70 3717
851efca8
JH
3718 params->auto_connect = auto_connect;
3719
d06b50ce
MH
3720 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3721 auto_connect);
a9b0a04c
AG
3722
3723 return 0;
15819a70
AG
3724}
3725
3726/* This function requires the caller holds hdev->lock */
3727void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3728{
3729 struct hci_conn_params *params;
3730
3731 params = hci_conn_params_lookup(hdev, addr, addr_type);
3732 if (!params)
3733 return;
3734
95305baa 3735 list_del(&params->action);
15819a70
AG
3736 list_del(&params->list);
3737 kfree(params);
3738
95305baa
JH
3739 hci_update_background_scan(hdev);
3740
15819a70
AG
3741 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3742}
3743
3744/* This function requires the caller holds hdev->lock */
55af49a8 3745void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3746{
3747 struct hci_conn_params *params, *tmp;
3748
3749 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3750 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3751 continue;
15819a70
AG
3752 list_del(&params->list);
3753 kfree(params);
3754 }
3755
55af49a8 3756 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3757}
3758
3759/* This function requires the caller holds hdev->lock */
373110c5 3760void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3761{
15819a70 3762 struct hci_conn_params *params, *tmp;
77a77a30 3763
15819a70 3764 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3765 list_del(&params->action);
15819a70
AG
3766 list_del(&params->list);
3767 kfree(params);
77a77a30
AG
3768 }
3769
a4790dbd 3770 hci_update_background_scan(hdev);
77a77a30 3771
15819a70 3772 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3773}
3774
4c87eaab 3775static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3776{
4c87eaab
AG
3777 if (status) {
3778 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3779
4c87eaab
AG
3780 hci_dev_lock(hdev);
3781 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3782 hci_dev_unlock(hdev);
3783 return;
3784 }
7ba8b4be
AG
3785}
3786
4c87eaab 3787static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3788{
4c87eaab
AG
3789 /* General inquiry access code (GIAC) */
3790 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3791 struct hci_request req;
3792 struct hci_cp_inquiry cp;
7ba8b4be
AG
3793 int err;
3794
4c87eaab
AG
3795 if (status) {
3796 BT_ERR("Failed to disable LE scanning: status %d", status);
3797 return;
3798 }
7ba8b4be 3799
4c87eaab
AG
3800 switch (hdev->discovery.type) {
3801 case DISCOV_TYPE_LE:
3802 hci_dev_lock(hdev);
3803 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3804 hci_dev_unlock(hdev);
3805 break;
7ba8b4be 3806
4c87eaab
AG
3807 case DISCOV_TYPE_INTERLEAVED:
3808 hci_req_init(&req, hdev);
7ba8b4be 3809
4c87eaab
AG
3810 memset(&cp, 0, sizeof(cp));
3811 memcpy(&cp.lap, lap, sizeof(cp.lap));
3812 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3813 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3814
4c87eaab 3815 hci_dev_lock(hdev);
7dbfac1d 3816
4c87eaab 3817 hci_inquiry_cache_flush(hdev);
7dbfac1d 3818
4c87eaab
AG
3819 err = hci_req_run(&req, inquiry_complete);
3820 if (err) {
3821 BT_ERR("Inquiry request failed: err %d", err);
3822 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3823 }
7dbfac1d 3824
4c87eaab
AG
3825 hci_dev_unlock(hdev);
3826 break;
7dbfac1d 3827 }
7dbfac1d
AG
3828}
3829
7ba8b4be
AG
3830static void le_scan_disable_work(struct work_struct *work)
3831{
3832 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3833 le_scan_disable.work);
4c87eaab
AG
3834 struct hci_request req;
3835 int err;
7ba8b4be
AG
3836
3837 BT_DBG("%s", hdev->name);
3838
4c87eaab 3839 hci_req_init(&req, hdev);
28b75a89 3840
b1efcc28 3841 hci_req_add_le_scan_disable(&req);
28b75a89 3842
4c87eaab
AG
3843 err = hci_req_run(&req, le_scan_disable_work_complete);
3844 if (err)
3845 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3846}
3847
8d97250e
JH
3848static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3849{
3850 struct hci_dev *hdev = req->hdev;
3851
3852 /* If we're advertising or initiating an LE connection we can't
3853 * go ahead and change the random address at this time. This is
3854 * because the eventual initiator address used for the
3855 * subsequently created connection will be undefined (some
3856 * controllers use the new address and others the one we had
3857 * when the operation started).
3858 *
3859 * In this kind of scenario skip the update and let the random
3860 * address be updated at the next cycle.
3861 */
5ce194c4 3862 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3863 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3864 BT_DBG("Deferring random address update");
3865 return;
3866 }
3867
3868 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3869}
3870
94b1fc92
MH
3871int hci_update_random_address(struct hci_request *req, bool require_privacy,
3872 u8 *own_addr_type)
ebd3a747
JH
3873{
3874 struct hci_dev *hdev = req->hdev;
3875 int err;
3876
3877 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3878 * current RPA has expired or there is something else than
3879 * the current RPA in use, then generate a new one.
ebd3a747
JH
3880 */
3881 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3882 int to;
3883
3884 *own_addr_type = ADDR_LE_DEV_RANDOM;
3885
3886 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3887 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3888 return 0;
3889
fabed38f
JH
3890 if (!hdev->tfm_aes) {
3891 BT_ERR("%s crypto not available to generate RPA",
3892 hdev->name);
3893 return -EOPNOTSUPP;
3894 }
3895
2b5224dc 3896 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3897 if (err < 0) {
3898 BT_ERR("%s failed to generate new RPA", hdev->name);
3899 return err;
3900 }
3901
8d97250e 3902 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3903
3904 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3905 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3906
3907 return 0;
94b1fc92
MH
3908 }
3909
3910 /* In case of required privacy without resolvable private address,
3911 * use an unresolvable private address. This is useful for active
3912 * scanning and non-connectable advertising.
3913 */
3914 if (require_privacy) {
3915 bdaddr_t urpa;
3916
3917 get_random_bytes(&urpa, 6);
3918 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3919
3920 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3921 set_random_addr(req, &urpa);
94b1fc92 3922 return 0;
ebd3a747
JH
3923 }
3924
3925 /* If forcing static address is in use or there is no public
3926 * address use the static address as random address (but skip
3927 * the HCI command if the current random address is already the
3928 * static one.
3929 */
111902f7 3930 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3931 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3932 *own_addr_type = ADDR_LE_DEV_RANDOM;
3933 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3934 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3935 &hdev->static_addr);
3936 return 0;
3937 }
3938
3939 /* Neither privacy nor static address is being used so use a
3940 * public address.
3941 */
3942 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3943
3944 return 0;
3945}
3946
a1f4c318
JH
3947/* Copy the Identity Address of the controller.
3948 *
3949 * If the controller has a public BD_ADDR, then by default use that one.
3950 * If this is a LE only controller without a public address, default to
3951 * the static random address.
3952 *
3953 * For debugging purposes it is possible to force controllers with a
3954 * public address to use the static random address instead.
3955 */
3956void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3957 u8 *bdaddr_type)
3958{
111902f7 3959 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3960 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3961 bacpy(bdaddr, &hdev->static_addr);
3962 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3963 } else {
3964 bacpy(bdaddr, &hdev->bdaddr);
3965 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3966 }
3967}
3968
9be0dab7
DH
3969/* Alloc HCI device */
3970struct hci_dev *hci_alloc_dev(void)
3971{
3972 struct hci_dev *hdev;
3973
27f70f3e 3974 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3975 if (!hdev)
3976 return NULL;
3977
b1b813d4
DH
3978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3979 hdev->esco_type = (ESCO_HV1);
3980 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3981 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3982 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3983 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3984 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3985 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3986
b1b813d4
DH
3987 hdev->sniff_max_interval = 800;
3988 hdev->sniff_min_interval = 80;
3989
3f959d46 3990 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3991 hdev->le_adv_min_interval = 0x0800;
3992 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3993 hdev->le_scan_interval = 0x0060;
3994 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3995 hdev->le_conn_min_interval = 0x0028;
3996 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3997 hdev->le_conn_latency = 0x0000;
3998 hdev->le_supv_timeout = 0x002a;
bef64738 3999
d6bfd59c 4000 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 4001 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
4002 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4003 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4004
b1b813d4
DH
4005 mutex_init(&hdev->lock);
4006 mutex_init(&hdev->req_lock);
4007
4008 INIT_LIST_HEAD(&hdev->mgmt_pending);
4009 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4010 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4011 INIT_LIST_HEAD(&hdev->uuids);
4012 INIT_LIST_HEAD(&hdev->link_keys);
4013 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4014 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4015 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4016 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4017 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4018 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4019 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4020 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4021
4022 INIT_WORK(&hdev->rx_work, hci_rx_work);
4023 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4024 INIT_WORK(&hdev->tx_work, hci_tx_work);
4025 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4026
b1b813d4
DH
4027 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4028 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4029 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4030
b1b813d4
DH
4031 skb_queue_head_init(&hdev->rx_q);
4032 skb_queue_head_init(&hdev->cmd_q);
4033 skb_queue_head_init(&hdev->raw_q);
4034
4035 init_waitqueue_head(&hdev->req_wait_q);
4036
65cc2b49 4037 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4038
b1b813d4
DH
4039 hci_init_sysfs(hdev);
4040 discovery_init(hdev);
9be0dab7
DH
4041
4042 return hdev;
4043}
4044EXPORT_SYMBOL(hci_alloc_dev);
4045
4046/* Free HCI device */
4047void hci_free_dev(struct hci_dev *hdev)
4048{
9be0dab7
DH
4049 /* will free via device release */
4050 put_device(&hdev->dev);
4051}
4052EXPORT_SYMBOL(hci_free_dev);
4053
1da177e4
LT
4054/* Register HCI device */
4055int hci_register_dev(struct hci_dev *hdev)
4056{
b1b813d4 4057 int id, error;
1da177e4 4058
74292d5a 4059 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4060 return -EINVAL;
4061
08add513
MM
4062 /* Do not allow HCI_AMP devices to register at index 0,
4063 * so the index can be used as the AMP controller ID.
4064 */
3df92b31
SL
4065 switch (hdev->dev_type) {
4066 case HCI_BREDR:
4067 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4068 break;
4069 case HCI_AMP:
4070 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4071 break;
4072 default:
4073 return -EINVAL;
1da177e4 4074 }
8e87d142 4075
3df92b31
SL
4076 if (id < 0)
4077 return id;
4078
1da177e4
LT
4079 sprintf(hdev->name, "hci%d", id);
4080 hdev->id = id;
2d8b3a11
AE
4081
4082 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4083
d8537548
KC
4084 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4085 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4086 if (!hdev->workqueue) {
4087 error = -ENOMEM;
4088 goto err;
4089 }
f48fd9c8 4090
d8537548
KC
4091 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4092 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4093 if (!hdev->req_workqueue) {
4094 destroy_workqueue(hdev->workqueue);
4095 error = -ENOMEM;
4096 goto err;
4097 }
4098
0153e2ec
MH
4099 if (!IS_ERR_OR_NULL(bt_debugfs))
4100 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4101
bdc3e0f1
MH
4102 dev_set_name(&hdev->dev, "%s", hdev->name);
4103
4104 error = device_add(&hdev->dev);
33ca954d 4105 if (error < 0)
54506918 4106 goto err_wqueue;
1da177e4 4107
611b30f7 4108 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4109 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4110 hdev);
611b30f7
MH
4111 if (hdev->rfkill) {
4112 if (rfkill_register(hdev->rfkill) < 0) {
4113 rfkill_destroy(hdev->rfkill);
4114 hdev->rfkill = NULL;
4115 }
4116 }
4117
5e130367
JH
4118 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4119 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4120
a8b2d5c2 4121 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4122 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4123
01cd3404 4124 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4125 /* Assume BR/EDR support until proven otherwise (such as
4126 * through reading supported features during init.
4127 */
4128 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4129 }
ce2be9ac 4130
fcee3377
GP
4131 write_lock(&hci_dev_list_lock);
4132 list_add(&hdev->list, &hci_dev_list);
4133 write_unlock(&hci_dev_list_lock);
4134
4a964404
MH
4135 /* Devices that are marked for raw-only usage are unconfigured
4136 * and should not be included in normal operation.
fee746b0
MH
4137 */
4138 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4139 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4140
1da177e4 4141 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4142 hci_dev_hold(hdev);
1da177e4 4143
19202573 4144 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4145
1da177e4 4146 return id;
f48fd9c8 4147
33ca954d
DH
4148err_wqueue:
4149 destroy_workqueue(hdev->workqueue);
6ead1bbc 4150 destroy_workqueue(hdev->req_workqueue);
33ca954d 4151err:
3df92b31 4152 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4153
33ca954d 4154 return error;
1da177e4
LT
4155}
4156EXPORT_SYMBOL(hci_register_dev);
4157
4158/* Unregister HCI device */
59735631 4159void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4160{
3df92b31 4161 int i, id;
ef222013 4162
c13854ce 4163 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4164
94324962
JH
4165 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4166
3df92b31
SL
4167 id = hdev->id;
4168
f20d09d5 4169 write_lock(&hci_dev_list_lock);
1da177e4 4170 list_del(&hdev->list);
f20d09d5 4171 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4172
4173 hci_dev_do_close(hdev);
4174
cd4c5391 4175 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4176 kfree_skb(hdev->reassembly[i]);
4177
b9b5ef18
GP
4178 cancel_work_sync(&hdev->power_on);
4179
ab81cbf9 4180 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4181 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4182 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4183 hci_dev_lock(hdev);
744cf19e 4184 mgmt_index_removed(hdev);
09fd0de5 4185 hci_dev_unlock(hdev);
56e5cb86 4186 }
ab81cbf9 4187
2e58ef3e
JH
4188 /* mgmt_index_removed should take care of emptying the
4189 * pending list */
4190 BUG_ON(!list_empty(&hdev->mgmt_pending));
4191
1da177e4
LT
4192 hci_notify(hdev, HCI_DEV_UNREG);
4193
611b30f7
MH
4194 if (hdev->rfkill) {
4195 rfkill_unregister(hdev->rfkill);
4196 rfkill_destroy(hdev->rfkill);
4197 }
4198
711eafe3 4199 smp_unregister(hdev);
99780a7b 4200
bdc3e0f1 4201 device_del(&hdev->dev);
147e2d59 4202
0153e2ec
MH
4203 debugfs_remove_recursive(hdev->debugfs);
4204
f48fd9c8 4205 destroy_workqueue(hdev->workqueue);
6ead1bbc 4206 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4207
09fd0de5 4208 hci_dev_lock(hdev);
dcc36c16 4209 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4210 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4211 hci_uuids_clear(hdev);
55ed8ca1 4212 hci_link_keys_clear(hdev);
b899efaf 4213 hci_smp_ltks_clear(hdev);
970c4e46 4214 hci_smp_irks_clear(hdev);
2763eda6 4215 hci_remote_oob_data_clear(hdev);
dcc36c16 4216 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4217 hci_conn_params_clear_all(hdev);
09fd0de5 4218 hci_dev_unlock(hdev);
e2e0cacb 4219
dc946bd8 4220 hci_dev_put(hdev);
3df92b31
SL
4221
4222 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4223}
4224EXPORT_SYMBOL(hci_unregister_dev);
4225
4226/* Suspend HCI device */
4227int hci_suspend_dev(struct hci_dev *hdev)
4228{
4229 hci_notify(hdev, HCI_DEV_SUSPEND);
4230 return 0;
4231}
4232EXPORT_SYMBOL(hci_suspend_dev);
4233
4234/* Resume HCI device */
4235int hci_resume_dev(struct hci_dev *hdev)
4236{
4237 hci_notify(hdev, HCI_DEV_RESUME);
4238 return 0;
4239}
4240EXPORT_SYMBOL(hci_resume_dev);
4241
76bca880 4242/* Receive frame from HCI drivers */
e1a26170 4243int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4244{
76bca880 4245 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4246 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4247 kfree_skb(skb);
4248 return -ENXIO;
4249 }
4250
d82603c6 4251 /* Incoming skb */
76bca880
MH
4252 bt_cb(skb)->incoming = 1;
4253
4254 /* Time stamp */
4255 __net_timestamp(skb);
4256
76bca880 4257 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4258 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4259
76bca880
MH
4260 return 0;
4261}
4262EXPORT_SYMBOL(hci_recv_frame);
4263
33e882a5 4264static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4265 int count, __u8 index)
33e882a5
SS
4266{
4267 int len = 0;
4268 int hlen = 0;
4269 int remain = count;
4270 struct sk_buff *skb;
4271 struct bt_skb_cb *scb;
4272
4273 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4274 index >= NUM_REASSEMBLY)
33e882a5
SS
4275 return -EILSEQ;
4276
4277 skb = hdev->reassembly[index];
4278
4279 if (!skb) {
4280 switch (type) {
4281 case HCI_ACLDATA_PKT:
4282 len = HCI_MAX_FRAME_SIZE;
4283 hlen = HCI_ACL_HDR_SIZE;
4284 break;
4285 case HCI_EVENT_PKT:
4286 len = HCI_MAX_EVENT_SIZE;
4287 hlen = HCI_EVENT_HDR_SIZE;
4288 break;
4289 case HCI_SCODATA_PKT:
4290 len = HCI_MAX_SCO_SIZE;
4291 hlen = HCI_SCO_HDR_SIZE;
4292 break;
4293 }
4294
1e429f38 4295 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4296 if (!skb)
4297 return -ENOMEM;
4298
4299 scb = (void *) skb->cb;
4300 scb->expect = hlen;
4301 scb->pkt_type = type;
4302
33e882a5
SS
4303 hdev->reassembly[index] = skb;
4304 }
4305
4306 while (count) {
4307 scb = (void *) skb->cb;
89bb46d0 4308 len = min_t(uint, scb->expect, count);
33e882a5
SS
4309
4310 memcpy(skb_put(skb, len), data, len);
4311
4312 count -= len;
4313 data += len;
4314 scb->expect -= len;
4315 remain = count;
4316
4317 switch (type) {
4318 case HCI_EVENT_PKT:
4319 if (skb->len == HCI_EVENT_HDR_SIZE) {
4320 struct hci_event_hdr *h = hci_event_hdr(skb);
4321 scb->expect = h->plen;
4322
4323 if (skb_tailroom(skb) < scb->expect) {
4324 kfree_skb(skb);
4325 hdev->reassembly[index] = NULL;
4326 return -ENOMEM;
4327 }
4328 }
4329 break;
4330
4331 case HCI_ACLDATA_PKT:
4332 if (skb->len == HCI_ACL_HDR_SIZE) {
4333 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4334 scb->expect = __le16_to_cpu(h->dlen);
4335
4336 if (skb_tailroom(skb) < scb->expect) {
4337 kfree_skb(skb);
4338 hdev->reassembly[index] = NULL;
4339 return -ENOMEM;
4340 }
4341 }
4342 break;
4343
4344 case HCI_SCODATA_PKT:
4345 if (skb->len == HCI_SCO_HDR_SIZE) {
4346 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4347 scb->expect = h->dlen;
4348
4349 if (skb_tailroom(skb) < scb->expect) {
4350 kfree_skb(skb);
4351 hdev->reassembly[index] = NULL;
4352 return -ENOMEM;
4353 }
4354 }
4355 break;
4356 }
4357
4358 if (scb->expect == 0) {
4359 /* Complete frame */
4360
4361 bt_cb(skb)->pkt_type = type;
e1a26170 4362 hci_recv_frame(hdev, skb);
33e882a5
SS
4363
4364 hdev->reassembly[index] = NULL;
4365 return remain;
4366 }
4367 }
4368
4369 return remain;
4370}
4371
ef222013
MH
4372int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4373{
f39a3c06
SS
4374 int rem = 0;
4375
ef222013
MH
4376 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4377 return -EILSEQ;
4378
da5f6c37 4379 while (count) {
1e429f38 4380 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4381 if (rem < 0)
4382 return rem;
ef222013 4383
f39a3c06
SS
4384 data += (count - rem);
4385 count = rem;
f81c6224 4386 }
ef222013 4387
f39a3c06 4388 return rem;
ef222013
MH
4389}
4390EXPORT_SYMBOL(hci_recv_fragment);
4391
99811510
SS
4392#define STREAM_REASSEMBLY 0
4393
4394int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4395{
4396 int type;
4397 int rem = 0;
4398
da5f6c37 4399 while (count) {
99811510
SS
4400 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4401
4402 if (!skb) {
4403 struct { char type; } *pkt;
4404
4405 /* Start of the frame */
4406 pkt = data;
4407 type = pkt->type;
4408
4409 data++;
4410 count--;
4411 } else
4412 type = bt_cb(skb)->pkt_type;
4413
1e429f38 4414 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4415 STREAM_REASSEMBLY);
99811510
SS
4416 if (rem < 0)
4417 return rem;
4418
4419 data += (count - rem);
4420 count = rem;
f81c6224 4421 }
99811510
SS
4422
4423 return rem;
4424}
4425EXPORT_SYMBOL(hci_recv_stream_fragment);
4426
1da177e4
LT
4427/* ---- Interface to upper protocols ---- */
4428
1da177e4
LT
4429int hci_register_cb(struct hci_cb *cb)
4430{
4431 BT_DBG("%p name %s", cb, cb->name);
4432
f20d09d5 4433 write_lock(&hci_cb_list_lock);
1da177e4 4434 list_add(&cb->list, &hci_cb_list);
f20d09d5 4435 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4436
4437 return 0;
4438}
4439EXPORT_SYMBOL(hci_register_cb);
4440
4441int hci_unregister_cb(struct hci_cb *cb)
4442{
4443 BT_DBG("%p name %s", cb, cb->name);
4444
f20d09d5 4445 write_lock(&hci_cb_list_lock);
1da177e4 4446 list_del(&cb->list);
f20d09d5 4447 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4448
4449 return 0;
4450}
4451EXPORT_SYMBOL(hci_unregister_cb);
4452
51086991 4453static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4454{
cdc52faa
MH
4455 int err;
4456
0d48d939 4457 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4458
cd82e61c
MH
4459 /* Time stamp */
4460 __net_timestamp(skb);
1da177e4 4461
cd82e61c
MH
4462 /* Send copy to monitor */
4463 hci_send_to_monitor(hdev, skb);
4464
4465 if (atomic_read(&hdev->promisc)) {
4466 /* Send copy to the sockets */
470fe1b5 4467 hci_send_to_sock(hdev, skb);
1da177e4
LT
4468 }
4469
4470 /* Get rid of skb owner, prior to sending to the driver. */
4471 skb_orphan(skb);
4472
cdc52faa
MH
4473 err = hdev->send(hdev, skb);
4474 if (err < 0) {
4475 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4476 kfree_skb(skb);
4477 }
1da177e4
LT
4478}
4479
3119ae95
JH
4480void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4481{
4482 skb_queue_head_init(&req->cmd_q);
4483 req->hdev = hdev;
5d73e034 4484 req->err = 0;
3119ae95
JH
4485}
4486
4487int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4488{
4489 struct hci_dev *hdev = req->hdev;
4490 struct sk_buff *skb;
4491 unsigned long flags;
4492
4493 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4494
5d73e034
AG
4495 /* If an error occured during request building, remove all HCI
4496 * commands queued on the HCI request queue.
4497 */
4498 if (req->err) {
4499 skb_queue_purge(&req->cmd_q);
4500 return req->err;
4501 }
4502
3119ae95
JH
4503 /* Do not allow empty requests */
4504 if (skb_queue_empty(&req->cmd_q))
382b0c39 4505 return -ENODATA;
3119ae95
JH
4506
4507 skb = skb_peek_tail(&req->cmd_q);
4508 bt_cb(skb)->req.complete = complete;
4509
4510 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4511 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4512 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4513
4514 queue_work(hdev->workqueue, &hdev->cmd_work);
4515
4516 return 0;
4517}
4518
899de765
MH
4519bool hci_req_pending(struct hci_dev *hdev)
4520{
4521 return (hdev->req_status == HCI_REQ_PEND);
4522}
4523
1ca3a9d0 4524static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4525 u32 plen, const void *param)
1da177e4
LT
4526{
4527 int len = HCI_COMMAND_HDR_SIZE + plen;
4528 struct hci_command_hdr *hdr;
4529 struct sk_buff *skb;
4530
1da177e4 4531 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4532 if (!skb)
4533 return NULL;
1da177e4
LT
4534
4535 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4536 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4537 hdr->plen = plen;
4538
4539 if (plen)
4540 memcpy(skb_put(skb, plen), param, plen);
4541
4542 BT_DBG("skb len %d", skb->len);
4543
0d48d939 4544 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4545
1ca3a9d0
JH
4546 return skb;
4547}
4548
4549/* Send HCI command */
07dc93dd
JH
4550int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4551 const void *param)
1ca3a9d0
JH
4552{
4553 struct sk_buff *skb;
4554
4555 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4556
4557 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4558 if (!skb) {
4559 BT_ERR("%s no memory for command", hdev->name);
4560 return -ENOMEM;
4561 }
4562
11714b3d
JH
4563 /* Stand-alone HCI commands must be flaged as
4564 * single-command requests.
4565 */
4566 bt_cb(skb)->req.start = true;
4567
1da177e4 4568 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4569 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4570
4571 return 0;
4572}
1da177e4 4573
71c76a17 4574/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4575void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4576 const void *param, u8 event)
71c76a17
JH
4577{
4578 struct hci_dev *hdev = req->hdev;
4579 struct sk_buff *skb;
4580
4581 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4582
34739c1e
AG
4583 /* If an error occured during request building, there is no point in
4584 * queueing the HCI command. We can simply return.
4585 */
4586 if (req->err)
4587 return;
4588
71c76a17
JH
4589 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4590 if (!skb) {
5d73e034
AG
4591 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4592 hdev->name, opcode);
4593 req->err = -ENOMEM;
e348fe6b 4594 return;
71c76a17
JH
4595 }
4596
4597 if (skb_queue_empty(&req->cmd_q))
4598 bt_cb(skb)->req.start = true;
4599
02350a72
JH
4600 bt_cb(skb)->req.event = event;
4601
71c76a17 4602 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4603}
4604
07dc93dd
JH
4605void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4606 const void *param)
02350a72
JH
4607{
4608 hci_req_add_ev(req, opcode, plen, param, 0);
4609}
4610
1da177e4 4611/* Get data from the previously sent command */
a9de9248 4612void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4613{
4614 struct hci_command_hdr *hdr;
4615
4616 if (!hdev->sent_cmd)
4617 return NULL;
4618
4619 hdr = (void *) hdev->sent_cmd->data;
4620
a9de9248 4621 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4622 return NULL;
4623
f0e09510 4624 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4625
4626 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4627}
4628
4629/* Send ACL data */
4630static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4631{
4632 struct hci_acl_hdr *hdr;
4633 int len = skb->len;
4634
badff6d0
ACM
4635 skb_push(skb, HCI_ACL_HDR_SIZE);
4636 skb_reset_transport_header(skb);
9c70220b 4637 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4638 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4639 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4640}
4641
ee22be7e 4642static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4643 struct sk_buff *skb, __u16 flags)
1da177e4 4644{
ee22be7e 4645 struct hci_conn *conn = chan->conn;
1da177e4
LT
4646 struct hci_dev *hdev = conn->hdev;
4647 struct sk_buff *list;
4648
087bfd99
GP
4649 skb->len = skb_headlen(skb);
4650 skb->data_len = 0;
4651
4652 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4653
4654 switch (hdev->dev_type) {
4655 case HCI_BREDR:
4656 hci_add_acl_hdr(skb, conn->handle, flags);
4657 break;
4658 case HCI_AMP:
4659 hci_add_acl_hdr(skb, chan->handle, flags);
4660 break;
4661 default:
4662 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4663 return;
4664 }
087bfd99 4665
70f23020
AE
4666 list = skb_shinfo(skb)->frag_list;
4667 if (!list) {
1da177e4
LT
4668 /* Non fragmented */
4669 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4670
73d80deb 4671 skb_queue_tail(queue, skb);
1da177e4
LT
4672 } else {
4673 /* Fragmented */
4674 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4675
4676 skb_shinfo(skb)->frag_list = NULL;
4677
4678 /* Queue all fragments atomically */
af3e6359 4679 spin_lock(&queue->lock);
1da177e4 4680
73d80deb 4681 __skb_queue_tail(queue, skb);
e702112f
AE
4682
4683 flags &= ~ACL_START;
4684 flags |= ACL_CONT;
1da177e4
LT
4685 do {
4686 skb = list; list = list->next;
8e87d142 4687
0d48d939 4688 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4689 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4690
4691 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4692
73d80deb 4693 __skb_queue_tail(queue, skb);
1da177e4
LT
4694 } while (list);
4695
af3e6359 4696 spin_unlock(&queue->lock);
1da177e4 4697 }
73d80deb
LAD
4698}
4699
4700void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4701{
ee22be7e 4702 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4703
f0e09510 4704 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4705
ee22be7e 4706 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4707
3eff45ea 4708 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4709}
1da177e4
LT
4710
4711/* Send SCO data */
0d861d8b 4712void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4713{
4714 struct hci_dev *hdev = conn->hdev;
4715 struct hci_sco_hdr hdr;
4716
4717 BT_DBG("%s len %d", hdev->name, skb->len);
4718
aca3192c 4719 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4720 hdr.dlen = skb->len;
4721
badff6d0
ACM
4722 skb_push(skb, HCI_SCO_HDR_SIZE);
4723 skb_reset_transport_header(skb);
9c70220b 4724 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4725
0d48d939 4726 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4727
1da177e4 4728 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4729 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4730}
1da177e4
LT
4731
4732/* ---- HCI TX task (outgoing data) ---- */
4733
4734/* HCI Connection scheduler */
6039aa73
GP
4735static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4736 int *quote)
1da177e4
LT
4737{
4738 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4739 struct hci_conn *conn = NULL, *c;
abc5de8f 4740 unsigned int num = 0, min = ~0;
1da177e4 4741
8e87d142 4742 /* We don't have to lock device here. Connections are always
1da177e4 4743 * added and removed with TX task disabled. */
bf4c6325
GP
4744
4745 rcu_read_lock();
4746
4747 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4748 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4749 continue;
769be974
MH
4750
4751 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4752 continue;
4753
1da177e4
LT
4754 num++;
4755
4756 if (c->sent < min) {
4757 min = c->sent;
4758 conn = c;
4759 }
52087a79
LAD
4760
4761 if (hci_conn_num(hdev, type) == num)
4762 break;
1da177e4
LT
4763 }
4764
bf4c6325
GP
4765 rcu_read_unlock();
4766
1da177e4 4767 if (conn) {
6ed58ec5
VT
4768 int cnt, q;
4769
4770 switch (conn->type) {
4771 case ACL_LINK:
4772 cnt = hdev->acl_cnt;
4773 break;
4774 case SCO_LINK:
4775 case ESCO_LINK:
4776 cnt = hdev->sco_cnt;
4777 break;
4778 case LE_LINK:
4779 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4780 break;
4781 default:
4782 cnt = 0;
4783 BT_ERR("Unknown link type");
4784 }
4785
4786 q = cnt / num;
1da177e4
LT
4787 *quote = q ? q : 1;
4788 } else
4789 *quote = 0;
4790
4791 BT_DBG("conn %p quote %d", conn, *quote);
4792 return conn;
4793}
4794
6039aa73 4795static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4796{
4797 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4798 struct hci_conn *c;
1da177e4 4799
bae1f5d9 4800 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4801
bf4c6325
GP
4802 rcu_read_lock();
4803
1da177e4 4804 /* Kill stalled connections */
bf4c6325 4805 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4806 if (c->type == type && c->sent) {
6ed93dc6
AE
4807 BT_ERR("%s killing stalled connection %pMR",
4808 hdev->name, &c->dst);
bed71748 4809 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4810 }
4811 }
bf4c6325
GP
4812
4813 rcu_read_unlock();
1da177e4
LT
4814}
4815
6039aa73
GP
4816static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4817 int *quote)
1da177e4 4818{
73d80deb
LAD
4819 struct hci_conn_hash *h = &hdev->conn_hash;
4820 struct hci_chan *chan = NULL;
abc5de8f 4821 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4822 struct hci_conn *conn;
73d80deb
LAD
4823 int cnt, q, conn_num = 0;
4824
4825 BT_DBG("%s", hdev->name);
4826
bf4c6325
GP
4827 rcu_read_lock();
4828
4829 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4830 struct hci_chan *tmp;
4831
4832 if (conn->type != type)
4833 continue;
4834
4835 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4836 continue;
4837
4838 conn_num++;
4839
8192edef 4840 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4841 struct sk_buff *skb;
4842
4843 if (skb_queue_empty(&tmp->data_q))
4844 continue;
4845
4846 skb = skb_peek(&tmp->data_q);
4847 if (skb->priority < cur_prio)
4848 continue;
4849
4850 if (skb->priority > cur_prio) {
4851 num = 0;
4852 min = ~0;
4853 cur_prio = skb->priority;
4854 }
4855
4856 num++;
4857
4858 if (conn->sent < min) {
4859 min = conn->sent;
4860 chan = tmp;
4861 }
4862 }
4863
4864 if (hci_conn_num(hdev, type) == conn_num)
4865 break;
4866 }
4867
bf4c6325
GP
4868 rcu_read_unlock();
4869
73d80deb
LAD
4870 if (!chan)
4871 return NULL;
4872
4873 switch (chan->conn->type) {
4874 case ACL_LINK:
4875 cnt = hdev->acl_cnt;
4876 break;
bd1eb66b
AE
4877 case AMP_LINK:
4878 cnt = hdev->block_cnt;
4879 break;
73d80deb
LAD
4880 case SCO_LINK:
4881 case ESCO_LINK:
4882 cnt = hdev->sco_cnt;
4883 break;
4884 case LE_LINK:
4885 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4886 break;
4887 default:
4888 cnt = 0;
4889 BT_ERR("Unknown link type");
4890 }
4891
4892 q = cnt / num;
4893 *quote = q ? q : 1;
4894 BT_DBG("chan %p quote %d", chan, *quote);
4895 return chan;
4896}
4897
02b20f0b
LAD
4898static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4899{
4900 struct hci_conn_hash *h = &hdev->conn_hash;
4901 struct hci_conn *conn;
4902 int num = 0;
4903
4904 BT_DBG("%s", hdev->name);
4905
bf4c6325
GP
4906 rcu_read_lock();
4907
4908 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4909 struct hci_chan *chan;
4910
4911 if (conn->type != type)
4912 continue;
4913
4914 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4915 continue;
4916
4917 num++;
4918
8192edef 4919 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4920 struct sk_buff *skb;
4921
4922 if (chan->sent) {
4923 chan->sent = 0;
4924 continue;
4925 }
4926
4927 if (skb_queue_empty(&chan->data_q))
4928 continue;
4929
4930 skb = skb_peek(&chan->data_q);
4931 if (skb->priority >= HCI_PRIO_MAX - 1)
4932 continue;
4933
4934 skb->priority = HCI_PRIO_MAX - 1;
4935
4936 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4937 skb->priority);
02b20f0b
LAD
4938 }
4939
4940 if (hci_conn_num(hdev, type) == num)
4941 break;
4942 }
bf4c6325
GP
4943
4944 rcu_read_unlock();
4945
02b20f0b
LAD
4946}
4947
b71d385a
AE
4948static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4949{
4950 /* Calculate count of blocks used by this packet */
4951 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4952}
4953
6039aa73 4954static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4955{
4a964404 4956 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4957 /* ACL tx timeout must be longer than maximum
4958 * link supervision timeout (40.9 seconds) */
63d2bc1b 4959 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4960 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4961 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4962 }
63d2bc1b 4963}
1da177e4 4964
6039aa73 4965static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4966{
4967 unsigned int cnt = hdev->acl_cnt;
4968 struct hci_chan *chan;
4969 struct sk_buff *skb;
4970 int quote;
4971
4972 __check_timeout(hdev, cnt);
04837f64 4973
73d80deb 4974 while (hdev->acl_cnt &&
a8c5fb1a 4975 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4976 u32 priority = (skb_peek(&chan->data_q))->priority;
4977 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4978 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4979 skb->len, skb->priority);
73d80deb 4980
ec1cce24
LAD
4981 /* Stop if priority has changed */
4982 if (skb->priority < priority)
4983 break;
4984
4985 skb = skb_dequeue(&chan->data_q);
4986
73d80deb 4987 hci_conn_enter_active_mode(chan->conn,
04124681 4988 bt_cb(skb)->force_active);
04837f64 4989
57d17d70 4990 hci_send_frame(hdev, skb);
1da177e4
LT
4991 hdev->acl_last_tx = jiffies;
4992
4993 hdev->acl_cnt--;
73d80deb
LAD
4994 chan->sent++;
4995 chan->conn->sent++;
1da177e4
LT
4996 }
4997 }
02b20f0b
LAD
4998
4999 if (cnt != hdev->acl_cnt)
5000 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5001}
5002
6039aa73 5003static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5004{
63d2bc1b 5005 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5006 struct hci_chan *chan;
5007 struct sk_buff *skb;
5008 int quote;
bd1eb66b 5009 u8 type;
b71d385a 5010
63d2bc1b 5011 __check_timeout(hdev, cnt);
b71d385a 5012
bd1eb66b
AE
5013 BT_DBG("%s", hdev->name);
5014
5015 if (hdev->dev_type == HCI_AMP)
5016 type = AMP_LINK;
5017 else
5018 type = ACL_LINK;
5019
b71d385a 5020 while (hdev->block_cnt > 0 &&
bd1eb66b 5021 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5022 u32 priority = (skb_peek(&chan->data_q))->priority;
5023 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5024 int blocks;
5025
5026 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5027 skb->len, skb->priority);
b71d385a
AE
5028
5029 /* Stop if priority has changed */
5030 if (skb->priority < priority)
5031 break;
5032
5033 skb = skb_dequeue(&chan->data_q);
5034
5035 blocks = __get_blocks(hdev, skb);
5036 if (blocks > hdev->block_cnt)
5037 return;
5038
5039 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5040 bt_cb(skb)->force_active);
b71d385a 5041
57d17d70 5042 hci_send_frame(hdev, skb);
b71d385a
AE
5043 hdev->acl_last_tx = jiffies;
5044
5045 hdev->block_cnt -= blocks;
5046 quote -= blocks;
5047
5048 chan->sent += blocks;
5049 chan->conn->sent += blocks;
5050 }
5051 }
5052
5053 if (cnt != hdev->block_cnt)
bd1eb66b 5054 hci_prio_recalculate(hdev, type);
b71d385a
AE
5055}
5056
6039aa73 5057static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5058{
5059 BT_DBG("%s", hdev->name);
5060
bd1eb66b
AE
5061 /* No ACL link over BR/EDR controller */
5062 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5063 return;
5064
5065 /* No AMP link over AMP controller */
5066 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5067 return;
5068
5069 switch (hdev->flow_ctl_mode) {
5070 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5071 hci_sched_acl_pkt(hdev);
5072 break;
5073
5074 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5075 hci_sched_acl_blk(hdev);
5076 break;
5077 }
5078}
5079
1da177e4 5080/* Schedule SCO */
6039aa73 5081static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5082{
5083 struct hci_conn *conn;
5084 struct sk_buff *skb;
5085 int quote;
5086
5087 BT_DBG("%s", hdev->name);
5088
52087a79
LAD
5089 if (!hci_conn_num(hdev, SCO_LINK))
5090 return;
5091
1da177e4
LT
5092 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5093 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5094 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5095 hci_send_frame(hdev, skb);
1da177e4
LT
5096
5097 conn->sent++;
5098 if (conn->sent == ~0)
5099 conn->sent = 0;
5100 }
5101 }
5102}
5103
6039aa73 5104static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5105{
5106 struct hci_conn *conn;
5107 struct sk_buff *skb;
5108 int quote;
5109
5110 BT_DBG("%s", hdev->name);
5111
52087a79
LAD
5112 if (!hci_conn_num(hdev, ESCO_LINK))
5113 return;
5114
8fc9ced3
GP
5115 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5116 &quote))) {
b6a0dc82
MH
5117 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5118 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5119 hci_send_frame(hdev, skb);
b6a0dc82
MH
5120
5121 conn->sent++;
5122 if (conn->sent == ~0)
5123 conn->sent = 0;
5124 }
5125 }
5126}
5127
6039aa73 5128static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5129{
73d80deb 5130 struct hci_chan *chan;
6ed58ec5 5131 struct sk_buff *skb;
02b20f0b 5132 int quote, cnt, tmp;
6ed58ec5
VT
5133
5134 BT_DBG("%s", hdev->name);
5135
52087a79
LAD
5136 if (!hci_conn_num(hdev, LE_LINK))
5137 return;
5138
4a964404 5139 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5140 /* LE tx timeout must be longer than maximum
5141 * link supervision timeout (40.9 seconds) */
bae1f5d9 5142 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5143 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5144 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5145 }
5146
5147 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5148 tmp = cnt;
73d80deb 5149 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5150 u32 priority = (skb_peek(&chan->data_q))->priority;
5151 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5152 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5153 skb->len, skb->priority);
6ed58ec5 5154
ec1cce24
LAD
5155 /* Stop if priority has changed */
5156 if (skb->priority < priority)
5157 break;
5158
5159 skb = skb_dequeue(&chan->data_q);
5160
57d17d70 5161 hci_send_frame(hdev, skb);
6ed58ec5
VT
5162 hdev->le_last_tx = jiffies;
5163
5164 cnt--;
73d80deb
LAD
5165 chan->sent++;
5166 chan->conn->sent++;
6ed58ec5
VT
5167 }
5168 }
73d80deb 5169
6ed58ec5
VT
5170 if (hdev->le_pkts)
5171 hdev->le_cnt = cnt;
5172 else
5173 hdev->acl_cnt = cnt;
02b20f0b
LAD
5174
5175 if (cnt != tmp)
5176 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5177}
5178
3eff45ea 5179static void hci_tx_work(struct work_struct *work)
1da177e4 5180{
3eff45ea 5181 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5182 struct sk_buff *skb;
5183
6ed58ec5 5184 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5185 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5186
52de599e
MH
5187 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5188 /* Schedule queues and send stuff to HCI driver */
5189 hci_sched_acl(hdev);
5190 hci_sched_sco(hdev);
5191 hci_sched_esco(hdev);
5192 hci_sched_le(hdev);
5193 }
6ed58ec5 5194
1da177e4
LT
5195 /* Send next queued raw (unknown type) packet */
5196 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5197 hci_send_frame(hdev, skb);
1da177e4
LT
5198}
5199
25985edc 5200/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5201
5202/* ACL data packet */
6039aa73 5203static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5204{
5205 struct hci_acl_hdr *hdr = (void *) skb->data;
5206 struct hci_conn *conn;
5207 __u16 handle, flags;
5208
5209 skb_pull(skb, HCI_ACL_HDR_SIZE);
5210
5211 handle = __le16_to_cpu(hdr->handle);
5212 flags = hci_flags(handle);
5213 handle = hci_handle(handle);
5214
f0e09510 5215 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5216 handle, flags);
1da177e4
LT
5217
5218 hdev->stat.acl_rx++;
5219
5220 hci_dev_lock(hdev);
5221 conn = hci_conn_hash_lookup_handle(hdev, handle);
5222 hci_dev_unlock(hdev);
8e87d142 5223
1da177e4 5224 if (conn) {
65983fc7 5225 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5226
1da177e4 5227 /* Send to upper protocol */
686ebf28
UF
5228 l2cap_recv_acldata(conn, skb, flags);
5229 return;
1da177e4 5230 } else {
8e87d142 5231 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5232 hdev->name, handle);
1da177e4
LT
5233 }
5234
5235 kfree_skb(skb);
5236}
5237
5238/* SCO data packet */
6039aa73 5239static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5240{
5241 struct hci_sco_hdr *hdr = (void *) skb->data;
5242 struct hci_conn *conn;
5243 __u16 handle;
5244
5245 skb_pull(skb, HCI_SCO_HDR_SIZE);
5246
5247 handle = __le16_to_cpu(hdr->handle);
5248
f0e09510 5249 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5250
5251 hdev->stat.sco_rx++;
5252
5253 hci_dev_lock(hdev);
5254 conn = hci_conn_hash_lookup_handle(hdev, handle);
5255 hci_dev_unlock(hdev);
5256
5257 if (conn) {
1da177e4 5258 /* Send to upper protocol */
686ebf28
UF
5259 sco_recv_scodata(conn, skb);
5260 return;
1da177e4 5261 } else {
8e87d142 5262 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5263 hdev->name, handle);
1da177e4
LT
5264 }
5265
5266 kfree_skb(skb);
5267}
5268
9238f36a
JH
5269static bool hci_req_is_complete(struct hci_dev *hdev)
5270{
5271 struct sk_buff *skb;
5272
5273 skb = skb_peek(&hdev->cmd_q);
5274 if (!skb)
5275 return true;
5276
5277 return bt_cb(skb)->req.start;
5278}
5279
42c6b129
JH
5280static void hci_resend_last(struct hci_dev *hdev)
5281{
5282 struct hci_command_hdr *sent;
5283 struct sk_buff *skb;
5284 u16 opcode;
5285
5286 if (!hdev->sent_cmd)
5287 return;
5288
5289 sent = (void *) hdev->sent_cmd->data;
5290 opcode = __le16_to_cpu(sent->opcode);
5291 if (opcode == HCI_OP_RESET)
5292 return;
5293
5294 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5295 if (!skb)
5296 return;
5297
5298 skb_queue_head(&hdev->cmd_q, skb);
5299 queue_work(hdev->workqueue, &hdev->cmd_work);
5300}
5301
9238f36a
JH
5302void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5303{
5304 hci_req_complete_t req_complete = NULL;
5305 struct sk_buff *skb;
5306 unsigned long flags;
5307
5308 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5309
42c6b129
JH
5310 /* If the completed command doesn't match the last one that was
5311 * sent we need to do special handling of it.
9238f36a 5312 */
42c6b129
JH
5313 if (!hci_sent_cmd_data(hdev, opcode)) {
5314 /* Some CSR based controllers generate a spontaneous
5315 * reset complete event during init and any pending
5316 * command will never be completed. In such a case we
5317 * need to resend whatever was the last sent
5318 * command.
5319 */
5320 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5321 hci_resend_last(hdev);
5322
9238f36a 5323 return;
42c6b129 5324 }
9238f36a
JH
5325
5326 /* If the command succeeded and there's still more commands in
5327 * this request the request is not yet complete.
5328 */
5329 if (!status && !hci_req_is_complete(hdev))
5330 return;
5331
5332 /* If this was the last command in a request the complete
5333 * callback would be found in hdev->sent_cmd instead of the
5334 * command queue (hdev->cmd_q).
5335 */
5336 if (hdev->sent_cmd) {
5337 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5338
5339 if (req_complete) {
5340 /* We must set the complete callback to NULL to
5341 * avoid calling the callback more than once if
5342 * this function gets called again.
5343 */
5344 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5345
9238f36a 5346 goto call_complete;
53e21fbc 5347 }
9238f36a
JH
5348 }
5349
5350 /* Remove all pending commands belonging to this request */
5351 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5352 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5353 if (bt_cb(skb)->req.start) {
5354 __skb_queue_head(&hdev->cmd_q, skb);
5355 break;
5356 }
5357
5358 req_complete = bt_cb(skb)->req.complete;
5359 kfree_skb(skb);
5360 }
5361 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5362
5363call_complete:
5364 if (req_complete)
5365 req_complete(hdev, status);
5366}
5367
b78752cc 5368static void hci_rx_work(struct work_struct *work)
1da177e4 5369{
b78752cc 5370 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5371 struct sk_buff *skb;
5372
5373 BT_DBG("%s", hdev->name);
5374
1da177e4 5375 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5376 /* Send copy to monitor */
5377 hci_send_to_monitor(hdev, skb);
5378
1da177e4
LT
5379 if (atomic_read(&hdev->promisc)) {
5380 /* Send copy to the sockets */
470fe1b5 5381 hci_send_to_sock(hdev, skb);
1da177e4
LT
5382 }
5383
fee746b0 5384 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5385 kfree_skb(skb);
5386 continue;
5387 }
5388
5389 if (test_bit(HCI_INIT, &hdev->flags)) {
5390 /* Don't process data packets in this states. */
0d48d939 5391 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5392 case HCI_ACLDATA_PKT:
5393 case HCI_SCODATA_PKT:
5394 kfree_skb(skb);
5395 continue;
3ff50b79 5396 }
1da177e4
LT
5397 }
5398
5399 /* Process frame */
0d48d939 5400 switch (bt_cb(skb)->pkt_type) {
1da177e4 5401 case HCI_EVENT_PKT:
b78752cc 5402 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5403 hci_event_packet(hdev, skb);
5404 break;
5405
5406 case HCI_ACLDATA_PKT:
5407 BT_DBG("%s ACL data packet", hdev->name);
5408 hci_acldata_packet(hdev, skb);
5409 break;
5410
5411 case HCI_SCODATA_PKT:
5412 BT_DBG("%s SCO data packet", hdev->name);
5413 hci_scodata_packet(hdev, skb);
5414 break;
5415
5416 default:
5417 kfree_skb(skb);
5418 break;
5419 }
5420 }
1da177e4
LT
5421}
5422
c347b765 5423static void hci_cmd_work(struct work_struct *work)
1da177e4 5424{
c347b765 5425 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5426 struct sk_buff *skb;
5427
2104786b
AE
5428 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5429 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5430
1da177e4 5431 /* Send queued commands */
5a08ecce
AE
5432 if (atomic_read(&hdev->cmd_cnt)) {
5433 skb = skb_dequeue(&hdev->cmd_q);
5434 if (!skb)
5435 return;
5436
7585b97a 5437 kfree_skb(hdev->sent_cmd);
1da177e4 5438
a675d7f1 5439 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5440 if (hdev->sent_cmd) {
1da177e4 5441 atomic_dec(&hdev->cmd_cnt);
57d17d70 5442 hci_send_frame(hdev, skb);
7bdb8a5c 5443 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5444 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5445 else
65cc2b49
MH
5446 schedule_delayed_work(&hdev->cmd_timer,
5447 HCI_CMD_TIMEOUT);
1da177e4
LT
5448 } else {
5449 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5450 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5451 }
5452 }
5453}
b1efcc28
AG
5454
5455void hci_req_add_le_scan_disable(struct hci_request *req)
5456{
5457 struct hci_cp_le_set_scan_enable cp;
5458
5459 memset(&cp, 0, sizeof(cp));
5460 cp.enable = LE_SCAN_DISABLE;
5461 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5462}
a4790dbd 5463
8540f6c0
MH
5464static void add_to_white_list(struct hci_request *req,
5465 struct hci_conn_params *params)
5466{
5467 struct hci_cp_le_add_to_white_list cp;
5468
5469 cp.bdaddr_type = params->addr_type;
5470 bacpy(&cp.bdaddr, &params->addr);
5471
5472 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5473}
5474
5475static u8 update_white_list(struct hci_request *req)
5476{
5477 struct hci_dev *hdev = req->hdev;
5478 struct hci_conn_params *params;
5479 struct bdaddr_list *b;
5480 uint8_t white_list_entries = 0;
5481
5482 /* Go through the current white list programmed into the
5483 * controller one by one and check if that address is still
5484 * in the list of pending connections or list of devices to
5485 * report. If not present in either list, then queue the
5486 * command to remove it from the controller.
5487 */
5488 list_for_each_entry(b, &hdev->le_white_list, list) {
5489 struct hci_cp_le_del_from_white_list cp;
5490
5491 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5492 &b->bdaddr, b->bdaddr_type) ||
5493 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5494 &b->bdaddr, b->bdaddr_type)) {
5495 white_list_entries++;
5496 continue;
5497 }
5498
5499 cp.bdaddr_type = b->bdaddr_type;
5500 bacpy(&cp.bdaddr, &b->bdaddr);
5501
5502 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5503 sizeof(cp), &cp);
5504 }
5505
5506 /* Since all no longer valid white list entries have been
5507 * removed, walk through the list of pending connections
5508 * and ensure that any new device gets programmed into
5509 * the controller.
5510 *
5511 * If the list of the devices is larger than the list of
5512 * available white list entries in the controller, then
5513 * just abort and return filer policy value to not use the
5514 * white list.
5515 */
5516 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5517 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5518 &params->addr, params->addr_type))
5519 continue;
5520
5521 if (white_list_entries >= hdev->le_white_list_size) {
5522 /* Select filter policy to accept all advertising */
5523 return 0x00;
5524 }
5525
66d8e837
MH
5526 if (hci_find_irk_by_addr(hdev, &params->addr,
5527 params->addr_type)) {
5528 /* White list can not be used with RPAs */
5529 return 0x00;
5530 }
5531
8540f6c0
MH
5532 white_list_entries++;
5533 add_to_white_list(req, params);
5534 }
5535
5536 /* After adding all new pending connections, walk through
5537 * the list of pending reports and also add these to the
5538 * white list if there is still space.
5539 */
5540 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5541 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5542 &params->addr, params->addr_type))
5543 continue;
5544
5545 if (white_list_entries >= hdev->le_white_list_size) {
5546 /* Select filter policy to accept all advertising */
5547 return 0x00;
5548 }
5549
66d8e837
MH
5550 if (hci_find_irk_by_addr(hdev, &params->addr,
5551 params->addr_type)) {
5552 /* White list can not be used with RPAs */
5553 return 0x00;
5554 }
5555
8540f6c0
MH
5556 white_list_entries++;
5557 add_to_white_list(req, params);
5558 }
5559
5560 /* Select filter policy to use white list */
5561 return 0x01;
5562}
5563
8ef30fd3
AG
5564void hci_req_add_le_passive_scan(struct hci_request *req)
5565{
5566 struct hci_cp_le_set_scan_param param_cp;
5567 struct hci_cp_le_set_scan_enable enable_cp;
5568 struct hci_dev *hdev = req->hdev;
5569 u8 own_addr_type;
8540f6c0 5570 u8 filter_policy;
8ef30fd3 5571
6ab535a7
MH
5572 /* Set require_privacy to false since no SCAN_REQ are send
5573 * during passive scanning. Not using an unresolvable address
5574 * here is important so that peer devices using direct
5575 * advertising with our address will be correctly reported
5576 * by the controller.
8ef30fd3 5577 */
6ab535a7 5578 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5579 return;
5580
8540f6c0
MH
5581 /* Adding or removing entries from the white list must
5582 * happen before enabling scanning. The controller does
5583 * not allow white list modification while scanning.
5584 */
5585 filter_policy = update_white_list(req);
5586
8ef30fd3
AG
5587 memset(&param_cp, 0, sizeof(param_cp));
5588 param_cp.type = LE_SCAN_PASSIVE;
5589 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5590 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5591 param_cp.own_address_type = own_addr_type;
8540f6c0 5592 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5593 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5594 &param_cp);
5595
5596 memset(&enable_cp, 0, sizeof(enable_cp));
5597 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5598 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5599 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5600 &enable_cp);
5601}
5602
a4790dbd
AG
5603static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5604{
5605 if (status)
5606 BT_DBG("HCI request failed to update background scanning: "
5607 "status 0x%2.2x", status);
5608}
5609
5610/* This function controls the background scanning based on hdev->pend_le_conns
5611 * list. If there are pending LE connection we start the background scanning,
5612 * otherwise we stop it.
5613 *
5614 * This function requires the caller holds hdev->lock.
5615 */
5616void hci_update_background_scan(struct hci_dev *hdev)
5617{
a4790dbd
AG
5618 struct hci_request req;
5619 struct hci_conn *conn;
5620 int err;
5621
c20c02d5
MH
5622 if (!test_bit(HCI_UP, &hdev->flags) ||
5623 test_bit(HCI_INIT, &hdev->flags) ||
5624 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5625 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5626 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5627 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5628 return;
5629
a70f4b5f
JH
5630 /* No point in doing scanning if LE support hasn't been enabled */
5631 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5632 return;
5633
ae23ada4
JH
5634 /* If discovery is active don't interfere with it */
5635 if (hdev->discovery.state != DISCOVERY_STOPPED)
5636 return;
5637
a4790dbd
AG
5638 hci_req_init(&req, hdev);
5639
d1d588c1 5640 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5641 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5642 /* If there is no pending LE connections or devices
5643 * to be scanned for, we should stop the background
5644 * scanning.
a4790dbd
AG
5645 */
5646
5647 /* If controller is not scanning we are done. */
5648 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5649 return;
5650
5651 hci_req_add_le_scan_disable(&req);
5652
5653 BT_DBG("%s stopping background scanning", hdev->name);
5654 } else {
a4790dbd
AG
5655 /* If there is at least one pending LE connection, we should
5656 * keep the background scan running.
5657 */
5658
a4790dbd
AG
5659 /* If controller is connecting, we should not start scanning
5660 * since some controllers are not able to scan and connect at
5661 * the same time.
5662 */
5663 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5664 if (conn)
5665 return;
5666
4340a124
AG
5667 /* If controller is currently scanning, we stop it to ensure we
5668 * don't miss any advertising (due to duplicates filter).
5669 */
5670 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5671 hci_req_add_le_scan_disable(&req);
5672
8ef30fd3 5673 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5674
5675 BT_DBG("%s starting background scanning", hdev->name);
5676 }
5677
5678 err = hci_req_run(&req, update_background_scan_complete);
5679 if (err)
5680 BT_ERR("Failed to run HCI request: err %d", err);
5681}
432df05e 5682
22f433dc
JH
5683static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5684{
5685 struct bdaddr_list *b;
5686
5687 list_for_each_entry(b, &hdev->whitelist, list) {
5688 struct hci_conn *conn;
5689
5690 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5691 if (!conn)
5692 return true;
5693
5694 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5695 return true;
5696 }
5697
5698 return false;
5699}
5700
432df05e
JH
5701void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5702{
5703 u8 scan;
5704
5705 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5706 return;
5707
5708 if (!hdev_is_powered(hdev))
5709 return;
5710
5711 if (mgmt_powering_down(hdev))
5712 return;
5713
5714 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5715 disconnected_whitelist_entries(hdev))
432df05e
JH
5716 scan = SCAN_PAGE;
5717 else
5718 scan = SCAN_DISABLED;
5719
5720 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5721 return;
5722
5723 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5724 scan |= SCAN_INQUIRY;
5725
5726 if (req)
5727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5728 else
5729 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5730}
This page took 1.264497 seconds and 5 git commands to generate.