Bluetooth: Fix completing SMP as peripheral when no keys are expected
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
e7b8fc92
MH
551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
92202185
MH
574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
8f8625cd
MH
602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
f813f1be 608 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
4e70c7e7
MH
632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
2be48b65 640 hdev->le_conn_min_interval = val;
4e70c7e7
MH
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
2be48b65 668 hdev->le_conn_max_interval = val;
4e70c7e7
MH
669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
89863109
JR
688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
1da177e4
LT
731/* ---- HCI requests ---- */
732
42c6b129 733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 734{
42c6b129 735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
77a63e0a
FW
755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
75e84b7c
JH
757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
7b1abbbe
JH
780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
75e84b7c
JH
786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
7b1abbbe 810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 811 const void *param, u8 event, u32 timeout)
75e84b7c
JH
812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
7b1abbbe 821 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
7b1abbbe
JH
860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 865 const void *param, u32 timeout)
7b1abbbe
JH
866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
1da177e4 871/* Execute request and wait for completion. */
01178cd4 872static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
873 void (*func)(struct hci_request *req,
874 unsigned long opt),
01178cd4 875 unsigned long opt, __u32 timeout)
1da177e4 876{
42c6b129 877 struct hci_request req;
1da177e4
LT
878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
42c6b129
JH
883 hci_req_init(&req, hdev);
884
1da177e4
LT
885 hdev->req_status = HCI_REQ_PEND;
886
42c6b129 887 func(&req, opt);
53cce22d 888
42c6b129
JH
889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
53cce22d 891 hdev->req_status = 0;
920c8300
AG
892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
42c6b129 897 */
920c8300
AG
898 if (err == -ENODATA)
899 return 0;
900
901 return err;
53cce22d
JH
902 }
903
bc4445c7
AG
904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
1da177e4
LT
907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
e175072f 916 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
3ff50b79 926 }
1da177e4 927
a5040efa 928 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
01178cd4 935static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
936 void (*req)(struct hci_request *req,
937 unsigned long opt),
01178cd4 938 unsigned long opt, __u32 timeout)
1da177e4
LT
939{
940 int ret;
941
7c6a329e
MH
942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
1da177e4
LT
945 /* Serialize all requests */
946 hci_req_lock(hdev);
01178cd4 947 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
42c6b129 953static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 954{
42c6b129 955 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
956
957 /* Reset device */
42c6b129
JH
958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
960}
961
42c6b129 962static void bredr_init(struct hci_request *req)
1da177e4 963{
42c6b129 964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 965
1da177e4 966 /* Read Local Supported Features */
42c6b129 967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 968
1143e5a6 969 /* Read Local Version */
42c6b129 970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
971
972 /* Read BD Address */
42c6b129 973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
974}
975
42c6b129 976static void amp_init(struct hci_request *req)
e61ef499 977{
42c6b129 978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 979
e61ef499 980 /* Read Local Version */
42c6b129 981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 982
f6996cfe
MH
983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
6bcbc489 989 /* Read Local AMP Info */
42c6b129 990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
991
992 /* Read Data Blk size */
42c6b129 993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 994
f38ba941
MH
995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
7528ca1c
MH
998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1000}
1001
42c6b129 1002static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1003{
42c6b129 1004 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
11778716
AE
1008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1010 hci_reset_req(req, 0);
11778716 1011
e61ef499
AE
1012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
42c6b129 1014 bredr_init(req);
e61ef499
AE
1015 break;
1016
1017 case HCI_AMP:
42c6b129 1018 amp_init(req);
e61ef499
AE
1019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
e61ef499
AE
1025}
1026
42c6b129 1027static void bredr_setup(struct hci_request *req)
2177bab5 1028{
4ca048e3
MH
1029 struct hci_dev *hdev = req->hdev;
1030
2177bab5
JH
1031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1036
1037 /* Read Class of Device */
42c6b129 1038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1039
1040 /* Read Local Name */
42c6b129 1041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1042
1043 /* Read Voice Setting */
42c6b129 1044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1045
b4cb9fb2
MH
1046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
4b836f39
MH
1049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
2177bab5
JH
1052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1059
4ca048e3
MH
1060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
2177bab5
JH
1067}
1068
42c6b129 1069static void le_setup(struct hci_request *req)
2177bab5 1070{
c73eee91
JH
1071 struct hci_dev *hdev = req->hdev;
1072
2177bab5 1073 /* Read LE Buffer Size */
42c6b129 1074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1075
1076 /* Read LE Local Supported Features */
42c6b129 1077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1078
1079 /* Read LE Advertising Channel TX Power */
42c6b129 1080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1081
1082 /* Read LE White List Size */
42c6b129 1083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1084
1085 /* Read LE Supported States */
42c6b129 1086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
42c6b129 1121static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1122{
1123 u8 mode;
1124
42c6b129 1125 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1126
42c6b129 1127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1128}
1129
42c6b129 1130static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1131{
42c6b129
JH
1132 struct hci_dev *hdev = req->hdev;
1133
2177bab5
JH
1134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
42c6b129 1201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
42c6b129
JH
1206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
2177bab5
JH
1208 }
1209}
1210
42c6b129 1211static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1212{
42c6b129
JH
1213 struct hci_dev *hdev = req->hdev;
1214
2177bab5 1215 if (lmp_bredr_capable(hdev))
42c6b129 1216 bredr_setup(req);
56f87901
JH
1217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1219
1220 if (lmp_le_capable(hdev))
42c6b129 1221 le_setup(req);
2177bab5 1222
42c6b129 1223 hci_setup_event_mask(req);
2177bab5 1224
3f8e2d75
JH
1225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1230
1231 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
2177bab5
JH
1240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
42c6b129
JH
1242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
2177bab5
JH
1244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
42c6b129 1250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
42c6b129 1255 hci_setup_inquiry_mode(req);
2177bab5
JH
1256
1257 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
42c6b129
JH
1264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
2177bab5
JH
1266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
42c6b129
JH
1270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
2177bab5
JH
1272 }
1273}
1274
42c6b129 1275static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1276{
42c6b129 1277 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
42c6b129 1291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1292}
1293
42c6b129 1294static void hci_set_le_support(struct hci_request *req)
2177bab5 1295{
42c6b129 1296 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1297 struct hci_cp_write_le_host_supported cp;
1298
c73eee91
JH
1299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
2177bab5
JH
1303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
2177bab5
JH
1313}
1314
d62e6d67
JH
1315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
53b834d2 1323 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
53b834d2 1333 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
40c59fcb
MH
1340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
d62e6d67
JH
1344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
42c6b129 1347static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1348{
42c6b129 1349 struct hci_dev *hdev = req->hdev;
d2c5d77f 1350 u8 p;
42c6b129 1351
b8f4e068
GP
1352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
f9f462fa
MH
1360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
637b4cae 1364 */
f9f462fa
MH
1365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
2177bab5 1375 if (hdev->commands[5] & 0x10)
42c6b129 1376 hci_setup_link_policy(req);
2177bab5 1377
79830f66 1378 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
79830f66 1390
42c6b129 1391 hci_set_le_support(req);
79830f66 1392 }
d2c5d77f
JH
1393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
2177bab5
JH
1402}
1403
5d4e7e8d
JH
1404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
d62e6d67
JH
1408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
5d4e7e8d 1412 /* Check for Synchronization Train support */
53b834d2 1413 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1415
1416 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
5d4e7e8d
JH
1424}
1425
2177bab5
JH
1426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
4b4148e9
MH
1434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
2177bab5
JH
1442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
5d4e7e8d
JH
1453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
baf27f6e
MH
1457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
dfb826a8
MH
1467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
ceeb3bc0
MH
1469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
47219839
MH
1475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
baf27f6e
MH
1477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
02d08d15
MH
1480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
babdbb3c
MH
1482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
041000b9
MH
1484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
baf27f6e
MH
1486 }
1487
06f5b778 1488 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
06f5b778
MH
1491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
134c2a89
MH
1495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
06f5b778 1497 }
ebd1e33b 1498
2bfa3531
MH
1499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
d0f729b8
MH
1508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
e7b8fc92
MH
1511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
92202185
MH
1513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
8f8625cd
MH
1515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
4e70c7e7
MH
1517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
89863109
JR
1521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
d0f729b8 1523 }
e7b8fc92 1524
baf27f6e 1525 return 0;
2177bab5
JH
1526}
1527
42c6b129 1528static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1529{
1530 __u8 scan = opt;
1531
42c6b129 1532 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1533
1534 /* Inquiry and Page scans */
42c6b129 1535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1536}
1537
42c6b129 1538static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1539{
1540 __u8 auth = opt;
1541
42c6b129 1542 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1543
1544 /* Authentication */
42c6b129 1545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1546}
1547
42c6b129 1548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1549{
1550 __u8 encrypt = opt;
1551
42c6b129 1552 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1553
e4e8e37c 1554 /* Encryption */
42c6b129 1555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1556}
1557
42c6b129 1558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1559{
1560 __le16 policy = cpu_to_le16(opt);
1561
42c6b129 1562 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1563
1564 /* Default link policy */
42c6b129 1565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1566}
1567
8e87d142 1568/* Get HCI device by index.
1da177e4
LT
1569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
8035ded4 1572 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
8035ded4 1580 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
1da177e4
LT
1589
1590/* ---- Inquiry support ---- */
ff9ef578 1591
30dc78e1
JH
1592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
6fbe195d 1596 switch (discov->state) {
343f935b 1597 case DISCOVERY_FINDING:
6fbe195d 1598 case DISCOVERY_RESOLVING:
30dc78e1
JH
1599 return true;
1600
6fbe195d
AG
1601 default:
1602 return false;
1603 }
30dc78e1
JH
1604}
1605
ff9ef578
JH
1606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
7b99b659
AG
1615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
ff9ef578
JH
1617 break;
1618 case DISCOVERY_STARTING:
1619 break;
343f935b 1620 case DISCOVERY_FINDING:
ff9ef578
JH
1621 mgmt_discovering(hdev, 1);
1622 break;
30dc78e1
JH
1623 case DISCOVERY_RESOLVING:
1624 break;
ff9ef578
JH
1625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
1f9b9a5d 1632void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1633{
30883512 1634 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1635 struct inquiry_entry *p, *n;
1da177e4 1636
561aafbc
JH
1637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
b57c1a56 1639 kfree(p);
1da177e4 1640 }
561aafbc
JH
1641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1644}
1645
a8c5fb1a
GP
1646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
1da177e4 1648{
30883512 1649 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1650 struct inquiry_entry *e;
1651
6ed93dc6 1652 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1653
561aafbc
JH
1654 list_for_each_entry(e, &cache->all, all) {
1655 if (!bacmp(&e->data.bdaddr, bdaddr))
1656 return e;
1657 }
1658
1659 return NULL;
1660}
1661
1662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1663 bdaddr_t *bdaddr)
561aafbc 1664{
30883512 1665 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1666 struct inquiry_entry *e;
1667
6ed93dc6 1668 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1669
1670 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1671 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1672 return e;
1673 }
1674
1675 return NULL;
1da177e4
LT
1676}
1677
30dc78e1 1678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1679 bdaddr_t *bdaddr,
1680 int state)
30dc78e1
JH
1681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
6ed93dc6 1685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
a3d4e20a 1697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1698 struct inquiry_entry *ie)
a3d4e20a
JH
1699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1708 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
3175405b 1716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1717 bool name_known, bool *ssp)
1da177e4 1718{
30883512 1719 struct discovery_state *cache = &hdev->discovery;
70f23020 1720 struct inquiry_entry *ie;
1da177e4 1721
6ed93dc6 1722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1723
2b2fec4d
SJ
1724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
388fc8fa
JH
1726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
70f23020 1729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1730 if (ie) {
388fc8fa
JH
1731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
a3d4e20a 1734 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1735 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
561aafbc 1740 goto update;
a3d4e20a 1741 }
561aafbc
JH
1742
1743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
3175405b 1746 return false;
561aafbc
JH
1747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
70f23020 1756
561aafbc
JH
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1759 ie->name_state != NAME_PENDING) {
561aafbc
JH
1760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
1da177e4
LT
1762 }
1763
70f23020
AE
1764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
1da177e4 1766 cache->timestamp = jiffies;
3175405b
JH
1767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
1da177e4
LT
1772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
30883512 1776 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
561aafbc 1781 list_for_each_entry(e, &cache->all, all) {
1da177e4 1782 struct inquiry_data *data = &e->data;
b57c1a56
JH
1783
1784 if (copied >= num)
1785 break;
1786
1da177e4
LT
1787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
b57c1a56 1793
1da177e4 1794 info++;
b57c1a56 1795 copied++;
1da177e4
LT
1796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
42c6b129 1802static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1805 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
42c6b129 1817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1818}
1819
3e13fa1e
AG
1820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
1da177e4
LT
1826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
5a08ecce
AE
1838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
1da177e4
LT
1840 return -ENODEV;
1841
0736cfa8
MH
1842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
5b69bef5
MH
1847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
56f87901
JH
1852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
09fd0de5 1857 hci_dev_lock(hdev);
8e87d142 1858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1860 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1861 do_inquiry = 1;
1862 }
09fd0de5 1863 hci_dev_unlock(hdev);
1da177e4 1864
04837f64 1865 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1866
1867 if (do_inquiry) {
01178cd4
JH
1868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
70f23020
AE
1870 if (err < 0)
1871 goto done;
3e13fa1e
AG
1872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
70f23020 1879 }
1da177e4 1880
8fc9ced3
GP
1881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
1da177e4
LT
1884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
01df8c31 1889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1890 if (!buf) {
1da177e4
LT
1891 err = -ENOMEM;
1892 goto done;
1893 }
1894
09fd0de5 1895 hci_dev_lock(hdev);
1da177e4 1896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1897 hci_dev_unlock(hdev);
1da177e4
LT
1898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1904 ir.num_rsp))
1da177e4 1905 err = -EFAULT;
8e87d142 1906 } else
1da177e4
LT
1907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
cbed0ca1 1916static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1917{
1da177e4
LT
1918 int ret = 0;
1919
1da177e4
LT
1920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
94324962
JH
1924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
a5c8f270
MH
1929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
c6beca0e
MH
1943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
a5c8f270
MH
1947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
c6beca0e
MH
1950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
611b30f7
MH
1957 }
1958
1da177e4
LT
1959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
1da177e4
LT
1964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
f41c70c4
MH
1969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
f41c70c4
MH
1976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
0736cfa8
MH
1979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1981 ret = __hci_init(hdev);
1da177e4
LT
1982 }
1983
f41c70c4
MH
1984 clear_bit(HCI_INIT, &hdev->flags);
1985
1da177e4
LT
1986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1992 hdev->dev_type == HCI_BREDR) {
09fd0de5 1993 hci_dev_lock(hdev);
744cf19e 1994 mgmt_powered(hdev, 1);
09fd0de5 1995 hci_dev_unlock(hdev);
56e5cb86 1996 }
8e87d142 1997 } else {
1da177e4 1998 /* Init failed, cleanup */
3eff45ea 1999 flush_work(&hdev->tx_work);
c347b765 2000 flush_work(&hdev->cmd_work);
b78752cc 2001 flush_work(&hdev->rx_work);
1da177e4
LT
2002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
1da177e4
LT
2020 return ret;
2021}
2022
cbed0ca1
JH
2023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
e1d08f40
JH
2034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
a5c8f270
MH
2042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
e1d08f40
JH
2046 flush_workqueue(hdev->req_workqueue);
2047
cbed0ca1
JH
2048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
1da177e4
LT
2055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
78c04c0b
VCG
2059 cancel_delayed_work(&hdev->power_off);
2060
1da177e4
LT
2061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2065 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
3eff45ea
GP
2070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
b78752cc 2072 flush_work(&hdev->rx_work);
1da177e4 2073
16ab91ab 2074 if (hdev->discov_timeout > 0) {
e0f9309f 2075 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2076 hdev->discov_timeout = 0;
5e5282bb 2077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2079 }
2080
a8b2d5c2 2081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2082 cancel_delayed_work(&hdev->service_cache);
2083
7ba8b4be
AG
2084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
09fd0de5 2086 hci_dev_lock(hdev);
1f9b9a5d 2087 hci_inquiry_cache_flush(hdev);
1da177e4 2088 hci_conn_hash_flush(hdev);
09fd0de5 2089 hci_dev_unlock(hdev);
1da177e4
LT
2090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2099 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2102 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
c347b765
GP
2107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
1da177e4
LT
2109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
b79f44c1 2117 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
b6ddb638
JH
2122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
1da177e4
LT
2125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
35b973c9
JH
2129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
93c311a0
MH
2133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
8ee56540 2139 }
5add6af8 2140
ced5c338 2141 /* Controller radio is available but is currently powered down */
536619e8 2142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2143
e59fda8d 2144 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2146
1da177e4
LT
2147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
70f23020
AE
2158 hdev = hci_dev_get(dev);
2159 if (!hdev)
1da177e4 2160 return -ENODEV;
8ee56540 2161
0736cfa8
MH
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
8ee56540
MH
2167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
1da177e4 2170 err = hci_dev_do_close(hdev);
8ee56540 2171
0736cfa8 2172done:
1da177e4
LT
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
70f23020
AE
2182 hdev = hci_dev_get(dev);
2183 if (!hdev)
1da177e4
LT
2184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
1da177e4 2187
808a049e
MH
2188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
1da177e4 2190 goto done;
808a049e 2191 }
1da177e4 2192
0736cfa8
MH
2193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
1da177e4
LT
2198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
09fd0de5 2202 hci_dev_lock(hdev);
1f9b9a5d 2203 hci_inquiry_cache_flush(hdev);
1da177e4 2204 hci_conn_hash_flush(hdev);
09fd0de5 2205 hci_dev_unlock(hdev);
1da177e4
LT
2206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
8e87d142 2210 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2215
2216done:
1da177e4
LT
2217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
70f23020
AE
2227 hdev = hci_dev_get(dev);
2228 if (!hdev)
1da177e4
LT
2229 return -ENODEV;
2230
0736cfa8
MH
2231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
1da177e4
LT
2236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
0736cfa8 2238done:
1da177e4 2239 hci_dev_put(hdev);
1da177e4
LT
2240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
70f23020
AE
2252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
1da177e4
LT
2254 return -ENODEV;
2255
0736cfa8
MH
2256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
5b69bef5
MH
2261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
56f87901
JH
2266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
1da177e4
LT
2271 switch (cmd) {
2272 case HCISETAUTH:
01178cd4
JH
2273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
1da177e4
LT
2275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
01178cd4
JH
2285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
1da177e4
LT
2287 if (err)
2288 break;
2289 }
2290
01178cd4
JH
2291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
1da177e4
LT
2293 break;
2294
2295 case HCISETSCAN:
01178cd4
JH
2296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
1da177e4
LT
2298 break;
2299
1da177e4 2300 case HCISETLINKPOL:
01178cd4
JH
2301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
1da177e4
LT
2303 break;
2304
2305 case HCISETLINKMODE:
e4e8e37c
MH
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
2310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2312 break;
2313
2314 case HCISETACLMTU:
e4e8e37c
MH
2315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2317 break;
2318
2319 case HCISETSCOMTU:
e4e8e37c
MH
2320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
e4e8e37c 2328
0736cfa8 2329done:
1da177e4
LT
2330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
8035ded4 2336 struct hci_dev *hdev;
1da177e4
LT
2337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
1da177e4
LT
2339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
70f23020
AE
2350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
1da177e4
LT
2352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
f20d09d5 2356 read_lock(&hci_dev_list_lock);
8035ded4 2357 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2359 cancel_delayed_work(&hdev->power_off);
c542a06c 2360
a8b2d5c2
JH
2361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2363
1da177e4
LT
2364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
c542a06c 2366
1da177e4
LT
2367 if (++n >= dev_num)
2368 break;
2369 }
f20d09d5 2370 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
70f23020
AE
2390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
1da177e4
LT
2392 return -ENODEV;
2393
a8b2d5c2 2394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2395 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2396
a8b2d5c2
JH
2397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2399
1da177e4
LT
2400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
60f2a3ed 2402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
1da177e4
LT
2416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
611b30f7
MH
2432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
0736cfa8
MH
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
5e130367
JH
2441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
5e130367
JH
2445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2447 }
611b30f7
MH
2448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
ab81cbf9
JH
2456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2459 int err;
ab81cbf9
JH
2460
2461 BT_DBG("%s", hdev->name);
2462
cbed0ca1 2463 err = hci_dev_do_open(hdev);
96570ffc
JH
2464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2466 return;
96570ffc 2467 }
ab81cbf9 2468
a5c8f270
MH
2469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
bf543036 2482 }
ab81cbf9 2483
a8b2d5c2 2484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2485 mgmt_index_added(hdev);
ab81cbf9
JH
2486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
3243553f 2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2491 power_off.work);
ab81cbf9
JH
2492
2493 BT_DBG("%s", hdev->name);
2494
8ee56540 2495 hci_dev_do_close(hdev);
ab81cbf9
JH
2496}
2497
16ab91ab
JH
2498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
16ab91ab
JH
2501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
d1967ff8 2506 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2507}
2508
35f7498a 2509void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2510{
4821002c 2511 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2512
4821002c
JH
2513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
2aeb9a1a
JH
2515 kfree(uuid);
2516 }
2aeb9a1a
JH
2517}
2518
35f7498a 2519void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
55ed8ca1
JH
2531}
2532
35f7498a 2533void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2534{
2535 struct smp_ltk *k, *tmp;
2536
2537 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2538 list_del(&k->list);
2539 kfree(k);
2540 }
b899efaf
VCG
2541}
2542
970c4e46
JH
2543void hci_smp_irks_clear(struct hci_dev *hdev)
2544{
2545 struct smp_irk *k, *tmp;
2546
2547 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551}
2552
55ed8ca1
JH
2553struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2554{
8035ded4 2555 struct link_key *k;
55ed8ca1 2556
8035ded4 2557 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2558 if (bacmp(bdaddr, &k->bdaddr) == 0)
2559 return k;
55ed8ca1
JH
2560
2561 return NULL;
2562}
2563
745c0ce3 2564static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2565 u8 key_type, u8 old_key_type)
d25e28ab
JH
2566{
2567 /* Legacy key */
2568 if (key_type < 0x03)
745c0ce3 2569 return true;
d25e28ab
JH
2570
2571 /* Debug keys are insecure so don't store them persistently */
2572 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2573 return false;
d25e28ab
JH
2574
2575 /* Changed combination key and there's no previous one */
2576 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2577 return false;
d25e28ab
JH
2578
2579 /* Security mode 3 case */
2580 if (!conn)
745c0ce3 2581 return true;
d25e28ab
JH
2582
2583 /* Neither local nor remote side had no-bonding as requirement */
2584 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2585 return true;
d25e28ab
JH
2586
2587 /* Local side had dedicated bonding as requirement */
2588 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2589 return true;
d25e28ab
JH
2590
2591 /* Remote side had dedicated bonding as requirement */
2592 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2593 return true;
d25e28ab
JH
2594
2595 /* If none of the above criteria match, then don't store the key
2596 * persistently */
745c0ce3 2597 return false;
d25e28ab
JH
2598}
2599
98a0b845
JH
2600static bool ltk_type_master(u8 type)
2601{
2602 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2603 return true;
2604
2605 return false;
2606}
2607
2608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2609 bool master)
75d262c2 2610{
c9839a11 2611 struct smp_ltk *k;
75d262c2 2612
c9839a11
VCG
2613 list_for_each_entry(k, &hdev->long_term_keys, list) {
2614 if (k->ediv != ediv ||
a8c5fb1a 2615 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2616 continue;
2617
98a0b845
JH
2618 if (ltk_type_master(k->type) != master)
2619 continue;
2620
c9839a11 2621 return k;
75d262c2
VCG
2622 }
2623
2624 return NULL;
2625}
75d262c2 2626
c9839a11 2627struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2628 u8 addr_type, bool master)
75d262c2 2629{
c9839a11 2630 struct smp_ltk *k;
75d262c2 2631
c9839a11
VCG
2632 list_for_each_entry(k, &hdev->long_term_keys, list)
2633 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2634 bacmp(bdaddr, &k->bdaddr) == 0 &&
2635 ltk_type_master(k->type) == master)
75d262c2
VCG
2636 return k;
2637
2638 return NULL;
2639}
75d262c2 2640
970c4e46
JH
2641struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2642{
2643 struct smp_irk *irk;
2644
2645 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2646 if (!bacmp(&irk->rpa, rpa))
2647 return irk;
2648 }
2649
2650 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2651 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2652 bacpy(&irk->rpa, rpa);
2653 return irk;
2654 }
2655 }
2656
2657 return NULL;
2658}
2659
2660struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type)
2662{
2663 struct smp_irk *irk;
2664
2665 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2666 if (addr_type == irk->addr_type &&
2667 bacmp(bdaddr, &irk->bdaddr) == 0)
2668 return irk;
2669 }
2670
2671 return NULL;
2672}
2673
d25e28ab 2674int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2675 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2676{
2677 struct link_key *key, *old_key;
745c0ce3
VA
2678 u8 old_key_type;
2679 bool persistent;
55ed8ca1
JH
2680
2681 old_key = hci_find_link_key(hdev, bdaddr);
2682 if (old_key) {
2683 old_key_type = old_key->type;
2684 key = old_key;
2685 } else {
12adcf3a 2686 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2687 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2688 if (!key)
2689 return -ENOMEM;
2690 list_add(&key->list, &hdev->link_keys);
2691 }
2692
6ed93dc6 2693 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2694
d25e28ab
JH
2695 /* Some buggy controller combinations generate a changed
2696 * combination key for legacy pairing even when there's no
2697 * previous key */
2698 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2699 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2700 type = HCI_LK_COMBINATION;
655fe6ec
JH
2701 if (conn)
2702 conn->key_type = type;
2703 }
d25e28ab 2704
55ed8ca1 2705 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2706 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2707 key->pin_len = pin_len;
2708
b6020ba0 2709 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2710 key->type = old_key_type;
4748fed2
JH
2711 else
2712 key->type = type;
2713
4df378a1
JH
2714 if (!new_key)
2715 return 0;
2716
2717 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2718
744cf19e 2719 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2720
6ec5bcad
VA
2721 if (conn)
2722 conn->flush_key = !persistent;
55ed8ca1
JH
2723
2724 return 0;
2725}
2726
c9839a11 2727int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2728 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2729 ediv, u8 rand[8])
75d262c2 2730{
c9839a11 2731 struct smp_ltk *key, *old_key;
98a0b845 2732 bool master = ltk_type_master(type);
0fe442ff 2733 u8 persistent;
75d262c2 2734
98a0b845 2735 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2736 if (old_key)
75d262c2 2737 key = old_key;
c9839a11
VCG
2738 else {
2739 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2740 if (!key)
2741 return -ENOMEM;
c9839a11 2742 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2743 }
2744
75d262c2 2745 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2746 key->bdaddr_type = addr_type;
2747 memcpy(key->val, tk, sizeof(key->val));
2748 key->authenticated = authenticated;
2749 key->ediv = ediv;
2750 key->enc_size = enc_size;
2751 key->type = type;
2752 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2753
c9839a11
VCG
2754 if (!new_key)
2755 return 0;
75d262c2 2756
0fe442ff
MH
2757 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2758 persistent = 0;
2759 else
2760 persistent = 1;
2761
21b93b75 2762 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
0fe442ff 2763 mgmt_new_ltk(hdev, key, persistent);
261cc5aa 2764
75d262c2
VCG
2765 return 0;
2766}
2767
970c4e46
JH
2768int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2769 u8 val[16], bdaddr_t *rpa)
2770{
2771 struct smp_irk *irk;
2772
2773 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2774 if (!irk) {
2775 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2776 if (!irk)
2777 return -ENOMEM;
2778
2779 bacpy(&irk->bdaddr, bdaddr);
2780 irk->addr_type = addr_type;
2781
2782 list_add(&irk->list, &hdev->identity_resolving_keys);
2783 }
2784
2785 memcpy(irk->val, val, 16);
2786 bacpy(&irk->rpa, rpa);
2787
2788 return 0;
2789}
2790
55ed8ca1
JH
2791int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct link_key *key;
2794
2795 key = hci_find_link_key(hdev, bdaddr);
2796 if (!key)
2797 return -ENOENT;
2798
6ed93dc6 2799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2800
2801 list_del(&key->list);
2802 kfree(key);
2803
2804 return 0;
2805}
2806
e0b2b27e 2807int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2808{
2809 struct smp_ltk *k, *tmp;
c51ffa0b 2810 int removed = 0;
b899efaf
VCG
2811
2812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2813 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2814 continue;
2815
6ed93dc6 2816 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2817
2818 list_del(&k->list);
2819 kfree(k);
c51ffa0b 2820 removed++;
b899efaf
VCG
2821 }
2822
c51ffa0b 2823 return removed ? 0 : -ENOENT;
b899efaf
VCG
2824}
2825
6bd32326 2826/* HCI command timer function */
bda4f23a 2827static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2828{
2829 struct hci_dev *hdev = (void *) arg;
2830
bda4f23a
AE
2831 if (hdev->sent_cmd) {
2832 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2833 u16 opcode = __le16_to_cpu(sent->opcode);
2834
2835 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2836 } else {
2837 BT_ERR("%s command tx timeout", hdev->name);
2838 }
2839
6bd32326 2840 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2841 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2842}
2843
2763eda6 2844struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2845 bdaddr_t *bdaddr)
2763eda6
SJ
2846{
2847 struct oob_data *data;
2848
2849 list_for_each_entry(data, &hdev->remote_oob_data, list)
2850 if (bacmp(bdaddr, &data->bdaddr) == 0)
2851 return data;
2852
2853 return NULL;
2854}
2855
2856int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2857{
2858 struct oob_data *data;
2859
2860 data = hci_find_remote_oob_data(hdev, bdaddr);
2861 if (!data)
2862 return -ENOENT;
2863
6ed93dc6 2864 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2865
2866 list_del(&data->list);
2867 kfree(data);
2868
2869 return 0;
2870}
2871
35f7498a 2872void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2873{
2874 struct oob_data *data, *n;
2875
2876 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2877 list_del(&data->list);
2878 kfree(data);
2879 }
2763eda6
SJ
2880}
2881
0798872e
MH
2882int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2883 u8 *hash, u8 *randomizer)
2763eda6
SJ
2884{
2885 struct oob_data *data;
2886
2887 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2888 if (!data) {
0798872e 2889 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2763eda6
SJ
2890 if (!data)
2891 return -ENOMEM;
2892
2893 bacpy(&data->bdaddr, bdaddr);
2894 list_add(&data->list, &hdev->remote_oob_data);
2895 }
2896
519ca9d0
MH
2897 memcpy(data->hash192, hash, sizeof(data->hash192));
2898 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2899
0798872e
MH
2900 memset(data->hash256, 0, sizeof(data->hash256));
2901 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2902
2903 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2904
2905 return 0;
2906}
2907
2908int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2909 u8 *hash192, u8 *randomizer192,
2910 u8 *hash256, u8 *randomizer256)
2911{
2912 struct oob_data *data;
2913
2914 data = hci_find_remote_oob_data(hdev, bdaddr);
2915 if (!data) {
2916 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2917 if (!data)
2918 return -ENOMEM;
2919
2920 bacpy(&data->bdaddr, bdaddr);
2921 list_add(&data->list, &hdev->remote_oob_data);
2922 }
2923
2924 memcpy(data->hash192, hash192, sizeof(data->hash192));
2925 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2926
2927 memcpy(data->hash256, hash256, sizeof(data->hash256));
2928 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2929
6ed93dc6 2930 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2931
2932 return 0;
2933}
2934
b9ee0a78
MH
2935struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2936 bdaddr_t *bdaddr, u8 type)
b2a66aad 2937{
8035ded4 2938 struct bdaddr_list *b;
b2a66aad 2939
b9ee0a78
MH
2940 list_for_each_entry(b, &hdev->blacklist, list) {
2941 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2942 return b;
b9ee0a78 2943 }
b2a66aad
AJ
2944
2945 return NULL;
2946}
2947
35f7498a 2948void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
2949{
2950 struct list_head *p, *n;
2951
2952 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2953 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2954
2955 list_del(p);
2956 kfree(b);
2957 }
b2a66aad
AJ
2958}
2959
88c1fe4b 2960int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2961{
2962 struct bdaddr_list *entry;
b2a66aad 2963
b9ee0a78 2964 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2965 return -EBADF;
2966
b9ee0a78 2967 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2968 return -EEXIST;
b2a66aad
AJ
2969
2970 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2971 if (!entry)
2972 return -ENOMEM;
b2a66aad
AJ
2973
2974 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2975 entry->bdaddr_type = type;
b2a66aad
AJ
2976
2977 list_add(&entry->list, &hdev->blacklist);
2978
88c1fe4b 2979 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2980}
2981
88c1fe4b 2982int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2983{
2984 struct bdaddr_list *entry;
b2a66aad 2985
35f7498a
JH
2986 if (!bacmp(bdaddr, BDADDR_ANY)) {
2987 hci_blacklist_clear(hdev);
2988 return 0;
2989 }
b2a66aad 2990
b9ee0a78 2991 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2992 if (!entry)
5e762444 2993 return -ENOENT;
b2a66aad
AJ
2994
2995 list_del(&entry->list);
2996 kfree(entry);
2997
88c1fe4b 2998 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2999}
3000
15819a70
AG
3001/* This function requires the caller holds hdev->lock */
3002struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3003 bdaddr_t *addr, u8 addr_type)
3004{
3005 struct hci_conn_params *params;
3006
3007 list_for_each_entry(params, &hdev->le_conn_params, list) {
3008 if (bacmp(&params->addr, addr) == 0 &&
3009 params->addr_type == addr_type) {
3010 return params;
3011 }
3012 }
3013
3014 return NULL;
3015}
3016
3017/* This function requires the caller holds hdev->lock */
3018void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3019 u16 conn_min_interval, u16 conn_max_interval)
3020{
3021 struct hci_conn_params *params;
3022
3023 params = hci_conn_params_lookup(hdev, addr, addr_type);
3024 if (params) {
3025 params->conn_min_interval = conn_min_interval;
3026 params->conn_max_interval = conn_max_interval;
3027 return;
3028 }
3029
3030 params = kzalloc(sizeof(*params), GFP_KERNEL);
3031 if (!params) {
3032 BT_ERR("Out of memory");
3033 return;
3034 }
3035
3036 bacpy(&params->addr, addr);
3037 params->addr_type = addr_type;
3038 params->conn_min_interval = conn_min_interval;
3039 params->conn_max_interval = conn_max_interval;
3040
3041 list_add(&params->list, &hdev->le_conn_params);
3042
3043 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3044 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3045 conn_max_interval);
3046}
3047
3048/* This function requires the caller holds hdev->lock */
3049void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3050{
3051 struct hci_conn_params *params;
3052
3053 params = hci_conn_params_lookup(hdev, addr, addr_type);
3054 if (!params)
3055 return;
3056
3057 list_del(&params->list);
3058 kfree(params);
3059
3060 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3061}
3062
3063/* This function requires the caller holds hdev->lock */
3064void hci_conn_params_clear(struct hci_dev *hdev)
3065{
3066 struct hci_conn_params *params, *tmp;
3067
3068 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3069 list_del(&params->list);
3070 kfree(params);
3071 }
3072
3073 BT_DBG("All LE connection parameters were removed");
3074}
3075
4c87eaab 3076static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3077{
4c87eaab
AG
3078 if (status) {
3079 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3080
4c87eaab
AG
3081 hci_dev_lock(hdev);
3082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3083 hci_dev_unlock(hdev);
3084 return;
3085 }
7ba8b4be
AG
3086}
3087
4c87eaab 3088static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3089{
4c87eaab
AG
3090 /* General inquiry access code (GIAC) */
3091 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3092 struct hci_request req;
3093 struct hci_cp_inquiry cp;
7ba8b4be
AG
3094 int err;
3095
4c87eaab
AG
3096 if (status) {
3097 BT_ERR("Failed to disable LE scanning: status %d", status);
3098 return;
3099 }
7ba8b4be 3100
4c87eaab
AG
3101 switch (hdev->discovery.type) {
3102 case DISCOV_TYPE_LE:
3103 hci_dev_lock(hdev);
3104 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3105 hci_dev_unlock(hdev);
3106 break;
7ba8b4be 3107
4c87eaab
AG
3108 case DISCOV_TYPE_INTERLEAVED:
3109 hci_req_init(&req, hdev);
7ba8b4be 3110
4c87eaab
AG
3111 memset(&cp, 0, sizeof(cp));
3112 memcpy(&cp.lap, lap, sizeof(cp.lap));
3113 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3114 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3115
4c87eaab 3116 hci_dev_lock(hdev);
7dbfac1d 3117
4c87eaab 3118 hci_inquiry_cache_flush(hdev);
7dbfac1d 3119
4c87eaab
AG
3120 err = hci_req_run(&req, inquiry_complete);
3121 if (err) {
3122 BT_ERR("Inquiry request failed: err %d", err);
3123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3124 }
7dbfac1d 3125
4c87eaab
AG
3126 hci_dev_unlock(hdev);
3127 break;
7dbfac1d 3128 }
7dbfac1d
AG
3129}
3130
7ba8b4be
AG
3131static void le_scan_disable_work(struct work_struct *work)
3132{
3133 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3134 le_scan_disable.work);
7ba8b4be 3135 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3136 struct hci_request req;
3137 int err;
7ba8b4be
AG
3138
3139 BT_DBG("%s", hdev->name);
3140
4c87eaab 3141 hci_req_init(&req, hdev);
28b75a89 3142
7ba8b4be 3143 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3144 cp.enable = LE_SCAN_DISABLE;
3145 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3146
4c87eaab
AG
3147 err = hci_req_run(&req, le_scan_disable_work_complete);
3148 if (err)
3149 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3150}
3151
9be0dab7
DH
3152/* Alloc HCI device */
3153struct hci_dev *hci_alloc_dev(void)
3154{
3155 struct hci_dev *hdev;
3156
3157 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3158 if (!hdev)
3159 return NULL;
3160
b1b813d4
DH
3161 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3162 hdev->esco_type = (ESCO_HV1);
3163 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3164 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3165 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3166 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3167 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3168
b1b813d4
DH
3169 hdev->sniff_max_interval = 800;
3170 hdev->sniff_min_interval = 80;
3171
bef64738
MH
3172 hdev->le_scan_interval = 0x0060;
3173 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3174 hdev->le_conn_min_interval = 0x0028;
3175 hdev->le_conn_max_interval = 0x0038;
bef64738 3176
b1b813d4
DH
3177 mutex_init(&hdev->lock);
3178 mutex_init(&hdev->req_lock);
3179
3180 INIT_LIST_HEAD(&hdev->mgmt_pending);
3181 INIT_LIST_HEAD(&hdev->blacklist);
3182 INIT_LIST_HEAD(&hdev->uuids);
3183 INIT_LIST_HEAD(&hdev->link_keys);
3184 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3185 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3186 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3187 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3188 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3189
3190 INIT_WORK(&hdev->rx_work, hci_rx_work);
3191 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3192 INIT_WORK(&hdev->tx_work, hci_tx_work);
3193 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3194
b1b813d4
DH
3195 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3196 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3197 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3198
b1b813d4
DH
3199 skb_queue_head_init(&hdev->rx_q);
3200 skb_queue_head_init(&hdev->cmd_q);
3201 skb_queue_head_init(&hdev->raw_q);
3202
3203 init_waitqueue_head(&hdev->req_wait_q);
3204
bda4f23a 3205 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3206
b1b813d4
DH
3207 hci_init_sysfs(hdev);
3208 discovery_init(hdev);
9be0dab7
DH
3209
3210 return hdev;
3211}
3212EXPORT_SYMBOL(hci_alloc_dev);
3213
3214/* Free HCI device */
3215void hci_free_dev(struct hci_dev *hdev)
3216{
9be0dab7
DH
3217 /* will free via device release */
3218 put_device(&hdev->dev);
3219}
3220EXPORT_SYMBOL(hci_free_dev);
3221
1da177e4
LT
3222/* Register HCI device */
3223int hci_register_dev(struct hci_dev *hdev)
3224{
b1b813d4 3225 int id, error;
1da177e4 3226
010666a1 3227 if (!hdev->open || !hdev->close)
1da177e4
LT
3228 return -EINVAL;
3229
08add513
MM
3230 /* Do not allow HCI_AMP devices to register at index 0,
3231 * so the index can be used as the AMP controller ID.
3232 */
3df92b31
SL
3233 switch (hdev->dev_type) {
3234 case HCI_BREDR:
3235 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3236 break;
3237 case HCI_AMP:
3238 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3239 break;
3240 default:
3241 return -EINVAL;
1da177e4 3242 }
8e87d142 3243
3df92b31
SL
3244 if (id < 0)
3245 return id;
3246
1da177e4
LT
3247 sprintf(hdev->name, "hci%d", id);
3248 hdev->id = id;
2d8b3a11
AE
3249
3250 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3251
d8537548
KC
3252 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3253 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3254 if (!hdev->workqueue) {
3255 error = -ENOMEM;
3256 goto err;
3257 }
f48fd9c8 3258
d8537548
KC
3259 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3260 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3261 if (!hdev->req_workqueue) {
3262 destroy_workqueue(hdev->workqueue);
3263 error = -ENOMEM;
3264 goto err;
3265 }
3266
0153e2ec
MH
3267 if (!IS_ERR_OR_NULL(bt_debugfs))
3268 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3269
bdc3e0f1
MH
3270 dev_set_name(&hdev->dev, "%s", hdev->name);
3271
99780a7b
JH
3272 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3273 CRYPTO_ALG_ASYNC);
3274 if (IS_ERR(hdev->tfm_aes)) {
3275 BT_ERR("Unable to create crypto context");
3276 error = PTR_ERR(hdev->tfm_aes);
3277 hdev->tfm_aes = NULL;
3278 goto err_wqueue;
3279 }
3280
bdc3e0f1 3281 error = device_add(&hdev->dev);
33ca954d 3282 if (error < 0)
99780a7b 3283 goto err_tfm;
1da177e4 3284
611b30f7 3285 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3286 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3287 hdev);
611b30f7
MH
3288 if (hdev->rfkill) {
3289 if (rfkill_register(hdev->rfkill) < 0) {
3290 rfkill_destroy(hdev->rfkill);
3291 hdev->rfkill = NULL;
3292 }
3293 }
3294
5e130367
JH
3295 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3296 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3297
a8b2d5c2 3298 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3299 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3300
01cd3404 3301 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3302 /* Assume BR/EDR support until proven otherwise (such as
3303 * through reading supported features during init.
3304 */
3305 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3306 }
ce2be9ac 3307
fcee3377
GP
3308 write_lock(&hci_dev_list_lock);
3309 list_add(&hdev->list, &hci_dev_list);
3310 write_unlock(&hci_dev_list_lock);
3311
1da177e4 3312 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3313 hci_dev_hold(hdev);
1da177e4 3314
19202573 3315 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3316
1da177e4 3317 return id;
f48fd9c8 3318
99780a7b
JH
3319err_tfm:
3320 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3321err_wqueue:
3322 destroy_workqueue(hdev->workqueue);
6ead1bbc 3323 destroy_workqueue(hdev->req_workqueue);
33ca954d 3324err:
3df92b31 3325 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3326
33ca954d 3327 return error;
1da177e4
LT
3328}
3329EXPORT_SYMBOL(hci_register_dev);
3330
3331/* Unregister HCI device */
59735631 3332void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3333{
3df92b31 3334 int i, id;
ef222013 3335
c13854ce 3336 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3337
94324962
JH
3338 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3339
3df92b31
SL
3340 id = hdev->id;
3341
f20d09d5 3342 write_lock(&hci_dev_list_lock);
1da177e4 3343 list_del(&hdev->list);
f20d09d5 3344 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3345
3346 hci_dev_do_close(hdev);
3347
cd4c5391 3348 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3349 kfree_skb(hdev->reassembly[i]);
3350
b9b5ef18
GP
3351 cancel_work_sync(&hdev->power_on);
3352
ab81cbf9 3353 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3354 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3355 hci_dev_lock(hdev);
744cf19e 3356 mgmt_index_removed(hdev);
09fd0de5 3357 hci_dev_unlock(hdev);
56e5cb86 3358 }
ab81cbf9 3359
2e58ef3e
JH
3360 /* mgmt_index_removed should take care of emptying the
3361 * pending list */
3362 BUG_ON(!list_empty(&hdev->mgmt_pending));
3363
1da177e4
LT
3364 hci_notify(hdev, HCI_DEV_UNREG);
3365
611b30f7
MH
3366 if (hdev->rfkill) {
3367 rfkill_unregister(hdev->rfkill);
3368 rfkill_destroy(hdev->rfkill);
3369 }
3370
99780a7b
JH
3371 if (hdev->tfm_aes)
3372 crypto_free_blkcipher(hdev->tfm_aes);
3373
bdc3e0f1 3374 device_del(&hdev->dev);
147e2d59 3375
0153e2ec
MH
3376 debugfs_remove_recursive(hdev->debugfs);
3377
f48fd9c8 3378 destroy_workqueue(hdev->workqueue);
6ead1bbc 3379 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3380
09fd0de5 3381 hci_dev_lock(hdev);
e2e0cacb 3382 hci_blacklist_clear(hdev);
2aeb9a1a 3383 hci_uuids_clear(hdev);
55ed8ca1 3384 hci_link_keys_clear(hdev);
b899efaf 3385 hci_smp_ltks_clear(hdev);
970c4e46 3386 hci_smp_irks_clear(hdev);
2763eda6 3387 hci_remote_oob_data_clear(hdev);
15819a70 3388 hci_conn_params_clear(hdev);
09fd0de5 3389 hci_dev_unlock(hdev);
e2e0cacb 3390
dc946bd8 3391 hci_dev_put(hdev);
3df92b31
SL
3392
3393 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3394}
3395EXPORT_SYMBOL(hci_unregister_dev);
3396
3397/* Suspend HCI device */
3398int hci_suspend_dev(struct hci_dev *hdev)
3399{
3400 hci_notify(hdev, HCI_DEV_SUSPEND);
3401 return 0;
3402}
3403EXPORT_SYMBOL(hci_suspend_dev);
3404
3405/* Resume HCI device */
3406int hci_resume_dev(struct hci_dev *hdev)
3407{
3408 hci_notify(hdev, HCI_DEV_RESUME);
3409 return 0;
3410}
3411EXPORT_SYMBOL(hci_resume_dev);
3412
76bca880 3413/* Receive frame from HCI drivers */
e1a26170 3414int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3415{
76bca880 3416 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3417 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3418 kfree_skb(skb);
3419 return -ENXIO;
3420 }
3421
d82603c6 3422 /* Incoming skb */
76bca880
MH
3423 bt_cb(skb)->incoming = 1;
3424
3425 /* Time stamp */
3426 __net_timestamp(skb);
3427
76bca880 3428 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3429 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3430
76bca880
MH
3431 return 0;
3432}
3433EXPORT_SYMBOL(hci_recv_frame);
3434
33e882a5 3435static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3436 int count, __u8 index)
33e882a5
SS
3437{
3438 int len = 0;
3439 int hlen = 0;
3440 int remain = count;
3441 struct sk_buff *skb;
3442 struct bt_skb_cb *scb;
3443
3444 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3445 index >= NUM_REASSEMBLY)
33e882a5
SS
3446 return -EILSEQ;
3447
3448 skb = hdev->reassembly[index];
3449
3450 if (!skb) {
3451 switch (type) {
3452 case HCI_ACLDATA_PKT:
3453 len = HCI_MAX_FRAME_SIZE;
3454 hlen = HCI_ACL_HDR_SIZE;
3455 break;
3456 case HCI_EVENT_PKT:
3457 len = HCI_MAX_EVENT_SIZE;
3458 hlen = HCI_EVENT_HDR_SIZE;
3459 break;
3460 case HCI_SCODATA_PKT:
3461 len = HCI_MAX_SCO_SIZE;
3462 hlen = HCI_SCO_HDR_SIZE;
3463 break;
3464 }
3465
1e429f38 3466 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3467 if (!skb)
3468 return -ENOMEM;
3469
3470 scb = (void *) skb->cb;
3471 scb->expect = hlen;
3472 scb->pkt_type = type;
3473
33e882a5
SS
3474 hdev->reassembly[index] = skb;
3475 }
3476
3477 while (count) {
3478 scb = (void *) skb->cb;
89bb46d0 3479 len = min_t(uint, scb->expect, count);
33e882a5
SS
3480
3481 memcpy(skb_put(skb, len), data, len);
3482
3483 count -= len;
3484 data += len;
3485 scb->expect -= len;
3486 remain = count;
3487
3488 switch (type) {
3489 case HCI_EVENT_PKT:
3490 if (skb->len == HCI_EVENT_HDR_SIZE) {
3491 struct hci_event_hdr *h = hci_event_hdr(skb);
3492 scb->expect = h->plen;
3493
3494 if (skb_tailroom(skb) < scb->expect) {
3495 kfree_skb(skb);
3496 hdev->reassembly[index] = NULL;
3497 return -ENOMEM;
3498 }
3499 }
3500 break;
3501
3502 case HCI_ACLDATA_PKT:
3503 if (skb->len == HCI_ACL_HDR_SIZE) {
3504 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3505 scb->expect = __le16_to_cpu(h->dlen);
3506
3507 if (skb_tailroom(skb) < scb->expect) {
3508 kfree_skb(skb);
3509 hdev->reassembly[index] = NULL;
3510 return -ENOMEM;
3511 }
3512 }
3513 break;
3514
3515 case HCI_SCODATA_PKT:
3516 if (skb->len == HCI_SCO_HDR_SIZE) {
3517 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3518 scb->expect = h->dlen;
3519
3520 if (skb_tailroom(skb) < scb->expect) {
3521 kfree_skb(skb);
3522 hdev->reassembly[index] = NULL;
3523 return -ENOMEM;
3524 }
3525 }
3526 break;
3527 }
3528
3529 if (scb->expect == 0) {
3530 /* Complete frame */
3531
3532 bt_cb(skb)->pkt_type = type;
e1a26170 3533 hci_recv_frame(hdev, skb);
33e882a5
SS
3534
3535 hdev->reassembly[index] = NULL;
3536 return remain;
3537 }
3538 }
3539
3540 return remain;
3541}
3542
ef222013
MH
3543int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3544{
f39a3c06
SS
3545 int rem = 0;
3546
ef222013
MH
3547 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3548 return -EILSEQ;
3549
da5f6c37 3550 while (count) {
1e429f38 3551 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3552 if (rem < 0)
3553 return rem;
ef222013 3554
f39a3c06
SS
3555 data += (count - rem);
3556 count = rem;
f81c6224 3557 }
ef222013 3558
f39a3c06 3559 return rem;
ef222013
MH
3560}
3561EXPORT_SYMBOL(hci_recv_fragment);
3562
99811510
SS
3563#define STREAM_REASSEMBLY 0
3564
3565int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3566{
3567 int type;
3568 int rem = 0;
3569
da5f6c37 3570 while (count) {
99811510
SS
3571 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3572
3573 if (!skb) {
3574 struct { char type; } *pkt;
3575
3576 /* Start of the frame */
3577 pkt = data;
3578 type = pkt->type;
3579
3580 data++;
3581 count--;
3582 } else
3583 type = bt_cb(skb)->pkt_type;
3584
1e429f38 3585 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3586 STREAM_REASSEMBLY);
99811510
SS
3587 if (rem < 0)
3588 return rem;
3589
3590 data += (count - rem);
3591 count = rem;
f81c6224 3592 }
99811510
SS
3593
3594 return rem;
3595}
3596EXPORT_SYMBOL(hci_recv_stream_fragment);
3597
1da177e4
LT
3598/* ---- Interface to upper protocols ---- */
3599
1da177e4
LT
3600int hci_register_cb(struct hci_cb *cb)
3601{
3602 BT_DBG("%p name %s", cb, cb->name);
3603
f20d09d5 3604 write_lock(&hci_cb_list_lock);
1da177e4 3605 list_add(&cb->list, &hci_cb_list);
f20d09d5 3606 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3607
3608 return 0;
3609}
3610EXPORT_SYMBOL(hci_register_cb);
3611
3612int hci_unregister_cb(struct hci_cb *cb)
3613{
3614 BT_DBG("%p name %s", cb, cb->name);
3615
f20d09d5 3616 write_lock(&hci_cb_list_lock);
1da177e4 3617 list_del(&cb->list);
f20d09d5 3618 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3619
3620 return 0;
3621}
3622EXPORT_SYMBOL(hci_unregister_cb);
3623
51086991 3624static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3625{
0d48d939 3626 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3627
cd82e61c
MH
3628 /* Time stamp */
3629 __net_timestamp(skb);
1da177e4 3630
cd82e61c
MH
3631 /* Send copy to monitor */
3632 hci_send_to_monitor(hdev, skb);
3633
3634 if (atomic_read(&hdev->promisc)) {
3635 /* Send copy to the sockets */
470fe1b5 3636 hci_send_to_sock(hdev, skb);
1da177e4
LT
3637 }
3638
3639 /* Get rid of skb owner, prior to sending to the driver. */
3640 skb_orphan(skb);
3641
7bd8f09f 3642 if (hdev->send(hdev, skb) < 0)
51086991 3643 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3644}
3645
3119ae95
JH
3646void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3647{
3648 skb_queue_head_init(&req->cmd_q);
3649 req->hdev = hdev;
5d73e034 3650 req->err = 0;
3119ae95
JH
3651}
3652
3653int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3654{
3655 struct hci_dev *hdev = req->hdev;
3656 struct sk_buff *skb;
3657 unsigned long flags;
3658
3659 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3660
5d73e034
AG
3661 /* If an error occured during request building, remove all HCI
3662 * commands queued on the HCI request queue.
3663 */
3664 if (req->err) {
3665 skb_queue_purge(&req->cmd_q);
3666 return req->err;
3667 }
3668
3119ae95
JH
3669 /* Do not allow empty requests */
3670 if (skb_queue_empty(&req->cmd_q))
382b0c39 3671 return -ENODATA;
3119ae95
JH
3672
3673 skb = skb_peek_tail(&req->cmd_q);
3674 bt_cb(skb)->req.complete = complete;
3675
3676 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3677 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3678 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3679
3680 queue_work(hdev->workqueue, &hdev->cmd_work);
3681
3682 return 0;
3683}
3684
1ca3a9d0 3685static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3686 u32 plen, const void *param)
1da177e4
LT
3687{
3688 int len = HCI_COMMAND_HDR_SIZE + plen;
3689 struct hci_command_hdr *hdr;
3690 struct sk_buff *skb;
3691
1da177e4 3692 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3693 if (!skb)
3694 return NULL;
1da177e4
LT
3695
3696 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3697 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3698 hdr->plen = plen;
3699
3700 if (plen)
3701 memcpy(skb_put(skb, plen), param, plen);
3702
3703 BT_DBG("skb len %d", skb->len);
3704
0d48d939 3705 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3706
1ca3a9d0
JH
3707 return skb;
3708}
3709
3710/* Send HCI command */
07dc93dd
JH
3711int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3712 const void *param)
1ca3a9d0
JH
3713{
3714 struct sk_buff *skb;
3715
3716 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3717
3718 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3719 if (!skb) {
3720 BT_ERR("%s no memory for command", hdev->name);
3721 return -ENOMEM;
3722 }
3723
11714b3d
JH
3724 /* Stand-alone HCI commands must be flaged as
3725 * single-command requests.
3726 */
3727 bt_cb(skb)->req.start = true;
3728
1da177e4 3729 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3730 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3731
3732 return 0;
3733}
1da177e4 3734
71c76a17 3735/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3736void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3737 const void *param, u8 event)
71c76a17
JH
3738{
3739 struct hci_dev *hdev = req->hdev;
3740 struct sk_buff *skb;
3741
3742 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3743
34739c1e
AG
3744 /* If an error occured during request building, there is no point in
3745 * queueing the HCI command. We can simply return.
3746 */
3747 if (req->err)
3748 return;
3749
71c76a17
JH
3750 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3751 if (!skb) {
5d73e034
AG
3752 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3753 hdev->name, opcode);
3754 req->err = -ENOMEM;
e348fe6b 3755 return;
71c76a17
JH
3756 }
3757
3758 if (skb_queue_empty(&req->cmd_q))
3759 bt_cb(skb)->req.start = true;
3760
02350a72
JH
3761 bt_cb(skb)->req.event = event;
3762
71c76a17 3763 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3764}
3765
07dc93dd
JH
3766void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3767 const void *param)
02350a72
JH
3768{
3769 hci_req_add_ev(req, opcode, plen, param, 0);
3770}
3771
1da177e4 3772/* Get data from the previously sent command */
a9de9248 3773void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3774{
3775 struct hci_command_hdr *hdr;
3776
3777 if (!hdev->sent_cmd)
3778 return NULL;
3779
3780 hdr = (void *) hdev->sent_cmd->data;
3781
a9de9248 3782 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3783 return NULL;
3784
f0e09510 3785 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3786
3787 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3788}
3789
3790/* Send ACL data */
3791static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3792{
3793 struct hci_acl_hdr *hdr;
3794 int len = skb->len;
3795
badff6d0
ACM
3796 skb_push(skb, HCI_ACL_HDR_SIZE);
3797 skb_reset_transport_header(skb);
9c70220b 3798 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3799 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3800 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3801}
3802
ee22be7e 3803static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3804 struct sk_buff *skb, __u16 flags)
1da177e4 3805{
ee22be7e 3806 struct hci_conn *conn = chan->conn;
1da177e4
LT
3807 struct hci_dev *hdev = conn->hdev;
3808 struct sk_buff *list;
3809
087bfd99
GP
3810 skb->len = skb_headlen(skb);
3811 skb->data_len = 0;
3812
3813 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3814
3815 switch (hdev->dev_type) {
3816 case HCI_BREDR:
3817 hci_add_acl_hdr(skb, conn->handle, flags);
3818 break;
3819 case HCI_AMP:
3820 hci_add_acl_hdr(skb, chan->handle, flags);
3821 break;
3822 default:
3823 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3824 return;
3825 }
087bfd99 3826
70f23020
AE
3827 list = skb_shinfo(skb)->frag_list;
3828 if (!list) {
1da177e4
LT
3829 /* Non fragmented */
3830 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3831
73d80deb 3832 skb_queue_tail(queue, skb);
1da177e4
LT
3833 } else {
3834 /* Fragmented */
3835 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3836
3837 skb_shinfo(skb)->frag_list = NULL;
3838
3839 /* Queue all fragments atomically */
af3e6359 3840 spin_lock(&queue->lock);
1da177e4 3841
73d80deb 3842 __skb_queue_tail(queue, skb);
e702112f
AE
3843
3844 flags &= ~ACL_START;
3845 flags |= ACL_CONT;
1da177e4
LT
3846 do {
3847 skb = list; list = list->next;
8e87d142 3848
0d48d939 3849 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3850 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3851
3852 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3853
73d80deb 3854 __skb_queue_tail(queue, skb);
1da177e4
LT
3855 } while (list);
3856
af3e6359 3857 spin_unlock(&queue->lock);
1da177e4 3858 }
73d80deb
LAD
3859}
3860
3861void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3862{
ee22be7e 3863 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3864
f0e09510 3865 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3866
ee22be7e 3867 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3868
3eff45ea 3869 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3870}
1da177e4
LT
3871
3872/* Send SCO data */
0d861d8b 3873void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3874{
3875 struct hci_dev *hdev = conn->hdev;
3876 struct hci_sco_hdr hdr;
3877
3878 BT_DBG("%s len %d", hdev->name, skb->len);
3879
aca3192c 3880 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3881 hdr.dlen = skb->len;
3882
badff6d0
ACM
3883 skb_push(skb, HCI_SCO_HDR_SIZE);
3884 skb_reset_transport_header(skb);
9c70220b 3885 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3886
0d48d939 3887 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3888
1da177e4 3889 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3890 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3891}
1da177e4
LT
3892
3893/* ---- HCI TX task (outgoing data) ---- */
3894
3895/* HCI Connection scheduler */
6039aa73
GP
3896static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3897 int *quote)
1da177e4
LT
3898{
3899 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3900 struct hci_conn *conn = NULL, *c;
abc5de8f 3901 unsigned int num = 0, min = ~0;
1da177e4 3902
8e87d142 3903 /* We don't have to lock device here. Connections are always
1da177e4 3904 * added and removed with TX task disabled. */
bf4c6325
GP
3905
3906 rcu_read_lock();
3907
3908 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3909 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3910 continue;
769be974
MH
3911
3912 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3913 continue;
3914
1da177e4
LT
3915 num++;
3916
3917 if (c->sent < min) {
3918 min = c->sent;
3919 conn = c;
3920 }
52087a79
LAD
3921
3922 if (hci_conn_num(hdev, type) == num)
3923 break;
1da177e4
LT
3924 }
3925
bf4c6325
GP
3926 rcu_read_unlock();
3927
1da177e4 3928 if (conn) {
6ed58ec5
VT
3929 int cnt, q;
3930
3931 switch (conn->type) {
3932 case ACL_LINK:
3933 cnt = hdev->acl_cnt;
3934 break;
3935 case SCO_LINK:
3936 case ESCO_LINK:
3937 cnt = hdev->sco_cnt;
3938 break;
3939 case LE_LINK:
3940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3941 break;
3942 default:
3943 cnt = 0;
3944 BT_ERR("Unknown link type");
3945 }
3946
3947 q = cnt / num;
1da177e4
LT
3948 *quote = q ? q : 1;
3949 } else
3950 *quote = 0;
3951
3952 BT_DBG("conn %p quote %d", conn, *quote);
3953 return conn;
3954}
3955
6039aa73 3956static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3957{
3958 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3959 struct hci_conn *c;
1da177e4 3960
bae1f5d9 3961 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3962
bf4c6325
GP
3963 rcu_read_lock();
3964
1da177e4 3965 /* Kill stalled connections */
bf4c6325 3966 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3967 if (c->type == type && c->sent) {
6ed93dc6
AE
3968 BT_ERR("%s killing stalled connection %pMR",
3969 hdev->name, &c->dst);
bed71748 3970 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3971 }
3972 }
bf4c6325
GP
3973
3974 rcu_read_unlock();
1da177e4
LT
3975}
3976
6039aa73
GP
3977static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3978 int *quote)
1da177e4 3979{
73d80deb
LAD
3980 struct hci_conn_hash *h = &hdev->conn_hash;
3981 struct hci_chan *chan = NULL;
abc5de8f 3982 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3983 struct hci_conn *conn;
73d80deb
LAD
3984 int cnt, q, conn_num = 0;
3985
3986 BT_DBG("%s", hdev->name);
3987
bf4c6325
GP
3988 rcu_read_lock();
3989
3990 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3991 struct hci_chan *tmp;
3992
3993 if (conn->type != type)
3994 continue;
3995
3996 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3997 continue;
3998
3999 conn_num++;
4000
8192edef 4001 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4002 struct sk_buff *skb;
4003
4004 if (skb_queue_empty(&tmp->data_q))
4005 continue;
4006
4007 skb = skb_peek(&tmp->data_q);
4008 if (skb->priority < cur_prio)
4009 continue;
4010
4011 if (skb->priority > cur_prio) {
4012 num = 0;
4013 min = ~0;
4014 cur_prio = skb->priority;
4015 }
4016
4017 num++;
4018
4019 if (conn->sent < min) {
4020 min = conn->sent;
4021 chan = tmp;
4022 }
4023 }
4024
4025 if (hci_conn_num(hdev, type) == conn_num)
4026 break;
4027 }
4028
bf4c6325
GP
4029 rcu_read_unlock();
4030
73d80deb
LAD
4031 if (!chan)
4032 return NULL;
4033
4034 switch (chan->conn->type) {
4035 case ACL_LINK:
4036 cnt = hdev->acl_cnt;
4037 break;
bd1eb66b
AE
4038 case AMP_LINK:
4039 cnt = hdev->block_cnt;
4040 break;
73d80deb
LAD
4041 case SCO_LINK:
4042 case ESCO_LINK:
4043 cnt = hdev->sco_cnt;
4044 break;
4045 case LE_LINK:
4046 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4047 break;
4048 default:
4049 cnt = 0;
4050 BT_ERR("Unknown link type");
4051 }
4052
4053 q = cnt / num;
4054 *quote = q ? q : 1;
4055 BT_DBG("chan %p quote %d", chan, *quote);
4056 return chan;
4057}
4058
02b20f0b
LAD
4059static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4060{
4061 struct hci_conn_hash *h = &hdev->conn_hash;
4062 struct hci_conn *conn;
4063 int num = 0;
4064
4065 BT_DBG("%s", hdev->name);
4066
bf4c6325
GP
4067 rcu_read_lock();
4068
4069 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4070 struct hci_chan *chan;
4071
4072 if (conn->type != type)
4073 continue;
4074
4075 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4076 continue;
4077
4078 num++;
4079
8192edef 4080 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4081 struct sk_buff *skb;
4082
4083 if (chan->sent) {
4084 chan->sent = 0;
4085 continue;
4086 }
4087
4088 if (skb_queue_empty(&chan->data_q))
4089 continue;
4090
4091 skb = skb_peek(&chan->data_q);
4092 if (skb->priority >= HCI_PRIO_MAX - 1)
4093 continue;
4094
4095 skb->priority = HCI_PRIO_MAX - 1;
4096
4097 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4098 skb->priority);
02b20f0b
LAD
4099 }
4100
4101 if (hci_conn_num(hdev, type) == num)
4102 break;
4103 }
bf4c6325
GP
4104
4105 rcu_read_unlock();
4106
02b20f0b
LAD
4107}
4108
b71d385a
AE
4109static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4110{
4111 /* Calculate count of blocks used by this packet */
4112 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4113}
4114
6039aa73 4115static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4116{
1da177e4
LT
4117 if (!test_bit(HCI_RAW, &hdev->flags)) {
4118 /* ACL tx timeout must be longer than maximum
4119 * link supervision timeout (40.9 seconds) */
63d2bc1b 4120 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4121 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4122 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4123 }
63d2bc1b 4124}
1da177e4 4125
6039aa73 4126static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4127{
4128 unsigned int cnt = hdev->acl_cnt;
4129 struct hci_chan *chan;
4130 struct sk_buff *skb;
4131 int quote;
4132
4133 __check_timeout(hdev, cnt);
04837f64 4134
73d80deb 4135 while (hdev->acl_cnt &&
a8c5fb1a 4136 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4137 u32 priority = (skb_peek(&chan->data_q))->priority;
4138 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4139 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4140 skb->len, skb->priority);
73d80deb 4141
ec1cce24
LAD
4142 /* Stop if priority has changed */
4143 if (skb->priority < priority)
4144 break;
4145
4146 skb = skb_dequeue(&chan->data_q);
4147
73d80deb 4148 hci_conn_enter_active_mode(chan->conn,
04124681 4149 bt_cb(skb)->force_active);
04837f64 4150
57d17d70 4151 hci_send_frame(hdev, skb);
1da177e4
LT
4152 hdev->acl_last_tx = jiffies;
4153
4154 hdev->acl_cnt--;
73d80deb
LAD
4155 chan->sent++;
4156 chan->conn->sent++;
1da177e4
LT
4157 }
4158 }
02b20f0b
LAD
4159
4160 if (cnt != hdev->acl_cnt)
4161 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4162}
4163
6039aa73 4164static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4165{
63d2bc1b 4166 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4167 struct hci_chan *chan;
4168 struct sk_buff *skb;
4169 int quote;
bd1eb66b 4170 u8 type;
b71d385a 4171
63d2bc1b 4172 __check_timeout(hdev, cnt);
b71d385a 4173
bd1eb66b
AE
4174 BT_DBG("%s", hdev->name);
4175
4176 if (hdev->dev_type == HCI_AMP)
4177 type = AMP_LINK;
4178 else
4179 type = ACL_LINK;
4180
b71d385a 4181 while (hdev->block_cnt > 0 &&
bd1eb66b 4182 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4183 u32 priority = (skb_peek(&chan->data_q))->priority;
4184 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4185 int blocks;
4186
4187 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4188 skb->len, skb->priority);
b71d385a
AE
4189
4190 /* Stop if priority has changed */
4191 if (skb->priority < priority)
4192 break;
4193
4194 skb = skb_dequeue(&chan->data_q);
4195
4196 blocks = __get_blocks(hdev, skb);
4197 if (blocks > hdev->block_cnt)
4198 return;
4199
4200 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4201 bt_cb(skb)->force_active);
b71d385a 4202
57d17d70 4203 hci_send_frame(hdev, skb);
b71d385a
AE
4204 hdev->acl_last_tx = jiffies;
4205
4206 hdev->block_cnt -= blocks;
4207 quote -= blocks;
4208
4209 chan->sent += blocks;
4210 chan->conn->sent += blocks;
4211 }
4212 }
4213
4214 if (cnt != hdev->block_cnt)
bd1eb66b 4215 hci_prio_recalculate(hdev, type);
b71d385a
AE
4216}
4217
6039aa73 4218static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4219{
4220 BT_DBG("%s", hdev->name);
4221
bd1eb66b
AE
4222 /* No ACL link over BR/EDR controller */
4223 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4224 return;
4225
4226 /* No AMP link over AMP controller */
4227 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4228 return;
4229
4230 switch (hdev->flow_ctl_mode) {
4231 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4232 hci_sched_acl_pkt(hdev);
4233 break;
4234
4235 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4236 hci_sched_acl_blk(hdev);
4237 break;
4238 }
4239}
4240
1da177e4 4241/* Schedule SCO */
6039aa73 4242static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4243{
4244 struct hci_conn *conn;
4245 struct sk_buff *skb;
4246 int quote;
4247
4248 BT_DBG("%s", hdev->name);
4249
52087a79
LAD
4250 if (!hci_conn_num(hdev, SCO_LINK))
4251 return;
4252
1da177e4
LT
4253 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4254 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4255 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4256 hci_send_frame(hdev, skb);
1da177e4
LT
4257
4258 conn->sent++;
4259 if (conn->sent == ~0)
4260 conn->sent = 0;
4261 }
4262 }
4263}
4264
6039aa73 4265static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4266{
4267 struct hci_conn *conn;
4268 struct sk_buff *skb;
4269 int quote;
4270
4271 BT_DBG("%s", hdev->name);
4272
52087a79
LAD
4273 if (!hci_conn_num(hdev, ESCO_LINK))
4274 return;
4275
8fc9ced3
GP
4276 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4277 &quote))) {
b6a0dc82
MH
4278 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4279 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4280 hci_send_frame(hdev, skb);
b6a0dc82
MH
4281
4282 conn->sent++;
4283 if (conn->sent == ~0)
4284 conn->sent = 0;
4285 }
4286 }
4287}
4288
6039aa73 4289static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4290{
73d80deb 4291 struct hci_chan *chan;
6ed58ec5 4292 struct sk_buff *skb;
02b20f0b 4293 int quote, cnt, tmp;
6ed58ec5
VT
4294
4295 BT_DBG("%s", hdev->name);
4296
52087a79
LAD
4297 if (!hci_conn_num(hdev, LE_LINK))
4298 return;
4299
6ed58ec5
VT
4300 if (!test_bit(HCI_RAW, &hdev->flags)) {
4301 /* LE tx timeout must be longer than maximum
4302 * link supervision timeout (40.9 seconds) */
bae1f5d9 4303 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4304 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4305 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4306 }
4307
4308 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4309 tmp = cnt;
73d80deb 4310 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4311 u32 priority = (skb_peek(&chan->data_q))->priority;
4312 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4313 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4314 skb->len, skb->priority);
6ed58ec5 4315
ec1cce24
LAD
4316 /* Stop if priority has changed */
4317 if (skb->priority < priority)
4318 break;
4319
4320 skb = skb_dequeue(&chan->data_q);
4321
57d17d70 4322 hci_send_frame(hdev, skb);
6ed58ec5
VT
4323 hdev->le_last_tx = jiffies;
4324
4325 cnt--;
73d80deb
LAD
4326 chan->sent++;
4327 chan->conn->sent++;
6ed58ec5
VT
4328 }
4329 }
73d80deb 4330
6ed58ec5
VT
4331 if (hdev->le_pkts)
4332 hdev->le_cnt = cnt;
4333 else
4334 hdev->acl_cnt = cnt;
02b20f0b
LAD
4335
4336 if (cnt != tmp)
4337 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4338}
4339
3eff45ea 4340static void hci_tx_work(struct work_struct *work)
1da177e4 4341{
3eff45ea 4342 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4343 struct sk_buff *skb;
4344
6ed58ec5 4345 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4346 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4347
52de599e
MH
4348 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4349 /* Schedule queues and send stuff to HCI driver */
4350 hci_sched_acl(hdev);
4351 hci_sched_sco(hdev);
4352 hci_sched_esco(hdev);
4353 hci_sched_le(hdev);
4354 }
6ed58ec5 4355
1da177e4
LT
4356 /* Send next queued raw (unknown type) packet */
4357 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4358 hci_send_frame(hdev, skb);
1da177e4
LT
4359}
4360
25985edc 4361/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4362
4363/* ACL data packet */
6039aa73 4364static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4365{
4366 struct hci_acl_hdr *hdr = (void *) skb->data;
4367 struct hci_conn *conn;
4368 __u16 handle, flags;
4369
4370 skb_pull(skb, HCI_ACL_HDR_SIZE);
4371
4372 handle = __le16_to_cpu(hdr->handle);
4373 flags = hci_flags(handle);
4374 handle = hci_handle(handle);
4375
f0e09510 4376 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4377 handle, flags);
1da177e4
LT
4378
4379 hdev->stat.acl_rx++;
4380
4381 hci_dev_lock(hdev);
4382 conn = hci_conn_hash_lookup_handle(hdev, handle);
4383 hci_dev_unlock(hdev);
8e87d142 4384
1da177e4 4385 if (conn) {
65983fc7 4386 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4387
1da177e4 4388 /* Send to upper protocol */
686ebf28
UF
4389 l2cap_recv_acldata(conn, skb, flags);
4390 return;
1da177e4 4391 } else {
8e87d142 4392 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4393 hdev->name, handle);
1da177e4
LT
4394 }
4395
4396 kfree_skb(skb);
4397}
4398
4399/* SCO data packet */
6039aa73 4400static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4401{
4402 struct hci_sco_hdr *hdr = (void *) skb->data;
4403 struct hci_conn *conn;
4404 __u16 handle;
4405
4406 skb_pull(skb, HCI_SCO_HDR_SIZE);
4407
4408 handle = __le16_to_cpu(hdr->handle);
4409
f0e09510 4410 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4411
4412 hdev->stat.sco_rx++;
4413
4414 hci_dev_lock(hdev);
4415 conn = hci_conn_hash_lookup_handle(hdev, handle);
4416 hci_dev_unlock(hdev);
4417
4418 if (conn) {
1da177e4 4419 /* Send to upper protocol */
686ebf28
UF
4420 sco_recv_scodata(conn, skb);
4421 return;
1da177e4 4422 } else {
8e87d142 4423 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4424 hdev->name, handle);
1da177e4
LT
4425 }
4426
4427 kfree_skb(skb);
4428}
4429
9238f36a
JH
4430static bool hci_req_is_complete(struct hci_dev *hdev)
4431{
4432 struct sk_buff *skb;
4433
4434 skb = skb_peek(&hdev->cmd_q);
4435 if (!skb)
4436 return true;
4437
4438 return bt_cb(skb)->req.start;
4439}
4440
42c6b129
JH
4441static void hci_resend_last(struct hci_dev *hdev)
4442{
4443 struct hci_command_hdr *sent;
4444 struct sk_buff *skb;
4445 u16 opcode;
4446
4447 if (!hdev->sent_cmd)
4448 return;
4449
4450 sent = (void *) hdev->sent_cmd->data;
4451 opcode = __le16_to_cpu(sent->opcode);
4452 if (opcode == HCI_OP_RESET)
4453 return;
4454
4455 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4456 if (!skb)
4457 return;
4458
4459 skb_queue_head(&hdev->cmd_q, skb);
4460 queue_work(hdev->workqueue, &hdev->cmd_work);
4461}
4462
9238f36a
JH
4463void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4464{
4465 hci_req_complete_t req_complete = NULL;
4466 struct sk_buff *skb;
4467 unsigned long flags;
4468
4469 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4470
42c6b129
JH
4471 /* If the completed command doesn't match the last one that was
4472 * sent we need to do special handling of it.
9238f36a 4473 */
42c6b129
JH
4474 if (!hci_sent_cmd_data(hdev, opcode)) {
4475 /* Some CSR based controllers generate a spontaneous
4476 * reset complete event during init and any pending
4477 * command will never be completed. In such a case we
4478 * need to resend whatever was the last sent
4479 * command.
4480 */
4481 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4482 hci_resend_last(hdev);
4483
9238f36a 4484 return;
42c6b129 4485 }
9238f36a
JH
4486
4487 /* If the command succeeded and there's still more commands in
4488 * this request the request is not yet complete.
4489 */
4490 if (!status && !hci_req_is_complete(hdev))
4491 return;
4492
4493 /* If this was the last command in a request the complete
4494 * callback would be found in hdev->sent_cmd instead of the
4495 * command queue (hdev->cmd_q).
4496 */
4497 if (hdev->sent_cmd) {
4498 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4499
4500 if (req_complete) {
4501 /* We must set the complete callback to NULL to
4502 * avoid calling the callback more than once if
4503 * this function gets called again.
4504 */
4505 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4506
9238f36a 4507 goto call_complete;
53e21fbc 4508 }
9238f36a
JH
4509 }
4510
4511 /* Remove all pending commands belonging to this request */
4512 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4513 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4514 if (bt_cb(skb)->req.start) {
4515 __skb_queue_head(&hdev->cmd_q, skb);
4516 break;
4517 }
4518
4519 req_complete = bt_cb(skb)->req.complete;
4520 kfree_skb(skb);
4521 }
4522 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4523
4524call_complete:
4525 if (req_complete)
4526 req_complete(hdev, status);
4527}
4528
b78752cc 4529static void hci_rx_work(struct work_struct *work)
1da177e4 4530{
b78752cc 4531 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4532 struct sk_buff *skb;
4533
4534 BT_DBG("%s", hdev->name);
4535
1da177e4 4536 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4537 /* Send copy to monitor */
4538 hci_send_to_monitor(hdev, skb);
4539
1da177e4
LT
4540 if (atomic_read(&hdev->promisc)) {
4541 /* Send copy to the sockets */
470fe1b5 4542 hci_send_to_sock(hdev, skb);
1da177e4
LT
4543 }
4544
0736cfa8
MH
4545 if (test_bit(HCI_RAW, &hdev->flags) ||
4546 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4547 kfree_skb(skb);
4548 continue;
4549 }
4550
4551 if (test_bit(HCI_INIT, &hdev->flags)) {
4552 /* Don't process data packets in this states. */
0d48d939 4553 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4554 case HCI_ACLDATA_PKT:
4555 case HCI_SCODATA_PKT:
4556 kfree_skb(skb);
4557 continue;
3ff50b79 4558 }
1da177e4
LT
4559 }
4560
4561 /* Process frame */
0d48d939 4562 switch (bt_cb(skb)->pkt_type) {
1da177e4 4563 case HCI_EVENT_PKT:
b78752cc 4564 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4565 hci_event_packet(hdev, skb);
4566 break;
4567
4568 case HCI_ACLDATA_PKT:
4569 BT_DBG("%s ACL data packet", hdev->name);
4570 hci_acldata_packet(hdev, skb);
4571 break;
4572
4573 case HCI_SCODATA_PKT:
4574 BT_DBG("%s SCO data packet", hdev->name);
4575 hci_scodata_packet(hdev, skb);
4576 break;
4577
4578 default:
4579 kfree_skb(skb);
4580 break;
4581 }
4582 }
1da177e4
LT
4583}
4584
c347b765 4585static void hci_cmd_work(struct work_struct *work)
1da177e4 4586{
c347b765 4587 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4588 struct sk_buff *skb;
4589
2104786b
AE
4590 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4591 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4592
1da177e4 4593 /* Send queued commands */
5a08ecce
AE
4594 if (atomic_read(&hdev->cmd_cnt)) {
4595 skb = skb_dequeue(&hdev->cmd_q);
4596 if (!skb)
4597 return;
4598
7585b97a 4599 kfree_skb(hdev->sent_cmd);
1da177e4 4600
a675d7f1 4601 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4602 if (hdev->sent_cmd) {
1da177e4 4603 atomic_dec(&hdev->cmd_cnt);
57d17d70 4604 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4605 if (test_bit(HCI_RESET, &hdev->flags))
4606 del_timer(&hdev->cmd_timer);
4607 else
4608 mod_timer(&hdev->cmd_timer,
5f246e89 4609 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4610 } else {
4611 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4612 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4613 }
4614 }
4615}
This page took 1.160809 seconds and 5 git commands to generate.