Bluetooth: Store local version information only during setup phase
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
dfb826a8
MH
61static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
70afe0b8
MH
92static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 99 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
47219839
MH
117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
baf27f6e
MH
154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
041000b9
MH
190static int voice_setting_get(void *data, u64 *val)
191{
192 struct hci_dev *hdev = data;
193
194 hci_dev_lock(hdev);
195 *val = hdev->voice_setting;
196 hci_dev_unlock(hdev);
197
198 return 0;
199}
200
201DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
202 NULL, "0x%4.4llx\n");
203
ebd1e33b
MH
204static int auto_accept_delay_set(void *data, u64 val)
205{
206 struct hci_dev *hdev = data;
207
208 hci_dev_lock(hdev);
209 hdev->auto_accept_delay = val;
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int auto_accept_delay_get(void *data, u64 *val)
216{
217 struct hci_dev *hdev = data;
218
219 hci_dev_lock(hdev);
220 *val = hdev->auto_accept_delay;
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
227 auto_accept_delay_set, "%llu\n");
228
2bfa3531
MH
229static int idle_timeout_set(void *data, u64 val)
230{
231 struct hci_dev *hdev = data;
232
233 if (val != 0 && (val < 500 || val > 3600000))
234 return -EINVAL;
235
236 hci_dev_lock(hdev);
237 hdev->idle_timeout= val;
238 hci_dev_unlock(hdev);
239
240 return 0;
241}
242
243static int idle_timeout_get(void *data, u64 *val)
244{
245 struct hci_dev *hdev = data;
246
247 hci_dev_lock(hdev);
248 *val = hdev->idle_timeout;
249 hci_dev_unlock(hdev);
250
251 return 0;
252}
253
254DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
255 idle_timeout_set, "%llu\n");
256
257static int sniff_min_interval_set(void *data, u64 val)
258{
259 struct hci_dev *hdev = data;
260
261 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
262 return -EINVAL;
263
264 hci_dev_lock(hdev);
265 hdev->sniff_min_interval= val;
266 hci_dev_unlock(hdev);
267
268 return 0;
269}
270
271static int sniff_min_interval_get(void *data, u64 *val)
272{
273 struct hci_dev *hdev = data;
274
275 hci_dev_lock(hdev);
276 *val = hdev->sniff_min_interval;
277 hci_dev_unlock(hdev);
278
279 return 0;
280}
281
282DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
283 sniff_min_interval_set, "%llu\n");
284
285static int sniff_max_interval_set(void *data, u64 val)
286{
287 struct hci_dev *hdev = data;
288
289 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
290 return -EINVAL;
291
292 hci_dev_lock(hdev);
293 hdev->sniff_max_interval= val;
294 hci_dev_unlock(hdev);
295
296 return 0;
297}
298
299static int sniff_max_interval_get(void *data, u64 *val)
300{
301 struct hci_dev *hdev = data;
302
303 hci_dev_lock(hdev);
304 *val = hdev->sniff_max_interval;
305 hci_dev_unlock(hdev);
306
307 return 0;
308}
309
310DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
311 sniff_max_interval_set, "%llu\n");
312
e7b8fc92
MH
313static int static_address_show(struct seq_file *f, void *p)
314{
315 struct hci_dev *hdev = f->private;
316
317 hci_dev_lock(hdev);
318 seq_printf(f, "%pMR\n", &hdev->static_addr);
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324static int static_address_open(struct inode *inode, struct file *file)
325{
326 return single_open(file, static_address_show, inode->i_private);
327}
328
329static const struct file_operations static_address_fops = {
330 .open = static_address_open,
331 .read = seq_read,
332 .llseek = seq_lseek,
333 .release = single_release,
334};
335
1da177e4
LT
336/* ---- HCI requests ---- */
337
42c6b129 338static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 339{
42c6b129 340 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
341
342 if (hdev->req_status == HCI_REQ_PEND) {
343 hdev->req_result = result;
344 hdev->req_status = HCI_REQ_DONE;
345 wake_up_interruptible(&hdev->req_wait_q);
346 }
347}
348
349static void hci_req_cancel(struct hci_dev *hdev, int err)
350{
351 BT_DBG("%s err 0x%2.2x", hdev->name, err);
352
353 if (hdev->req_status == HCI_REQ_PEND) {
354 hdev->req_result = err;
355 hdev->req_status = HCI_REQ_CANCELED;
356 wake_up_interruptible(&hdev->req_wait_q);
357 }
358}
359
77a63e0a
FW
360static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
361 u8 event)
75e84b7c
JH
362{
363 struct hci_ev_cmd_complete *ev;
364 struct hci_event_hdr *hdr;
365 struct sk_buff *skb;
366
367 hci_dev_lock(hdev);
368
369 skb = hdev->recv_evt;
370 hdev->recv_evt = NULL;
371
372 hci_dev_unlock(hdev);
373
374 if (!skb)
375 return ERR_PTR(-ENODATA);
376
377 if (skb->len < sizeof(*hdr)) {
378 BT_ERR("Too short HCI event");
379 goto failed;
380 }
381
382 hdr = (void *) skb->data;
383 skb_pull(skb, HCI_EVENT_HDR_SIZE);
384
7b1abbbe
JH
385 if (event) {
386 if (hdr->evt != event)
387 goto failed;
388 return skb;
389 }
390
75e84b7c
JH
391 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
392 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
393 goto failed;
394 }
395
396 if (skb->len < sizeof(*ev)) {
397 BT_ERR("Too short cmd_complete event");
398 goto failed;
399 }
400
401 ev = (void *) skb->data;
402 skb_pull(skb, sizeof(*ev));
403
404 if (opcode == __le16_to_cpu(ev->opcode))
405 return skb;
406
407 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
408 __le16_to_cpu(ev->opcode));
409
410failed:
411 kfree_skb(skb);
412 return ERR_PTR(-ENODATA);
413}
414
7b1abbbe 415struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 416 const void *param, u8 event, u32 timeout)
75e84b7c
JH
417{
418 DECLARE_WAITQUEUE(wait, current);
419 struct hci_request req;
420 int err = 0;
421
422 BT_DBG("%s", hdev->name);
423
424 hci_req_init(&req, hdev);
425
7b1abbbe 426 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
427
428 hdev->req_status = HCI_REQ_PEND;
429
430 err = hci_req_run(&req, hci_req_sync_complete);
431 if (err < 0)
432 return ERR_PTR(err);
433
434 add_wait_queue(&hdev->req_wait_q, &wait);
435 set_current_state(TASK_INTERRUPTIBLE);
436
437 schedule_timeout(timeout);
438
439 remove_wait_queue(&hdev->req_wait_q, &wait);
440
441 if (signal_pending(current))
442 return ERR_PTR(-EINTR);
443
444 switch (hdev->req_status) {
445 case HCI_REQ_DONE:
446 err = -bt_to_errno(hdev->req_result);
447 break;
448
449 case HCI_REQ_CANCELED:
450 err = -hdev->req_result;
451 break;
452
453 default:
454 err = -ETIMEDOUT;
455 break;
456 }
457
458 hdev->req_status = hdev->req_result = 0;
459
460 BT_DBG("%s end: err %d", hdev->name, err);
461
462 if (err < 0)
463 return ERR_PTR(err);
464
7b1abbbe
JH
465 return hci_get_cmd_complete(hdev, opcode, event);
466}
467EXPORT_SYMBOL(__hci_cmd_sync_ev);
468
469struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 470 const void *param, u32 timeout)
7b1abbbe
JH
471{
472 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
473}
474EXPORT_SYMBOL(__hci_cmd_sync);
475
1da177e4 476/* Execute request and wait for completion. */
01178cd4 477static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
478 void (*func)(struct hci_request *req,
479 unsigned long opt),
01178cd4 480 unsigned long opt, __u32 timeout)
1da177e4 481{
42c6b129 482 struct hci_request req;
1da177e4
LT
483 DECLARE_WAITQUEUE(wait, current);
484 int err = 0;
485
486 BT_DBG("%s start", hdev->name);
487
42c6b129
JH
488 hci_req_init(&req, hdev);
489
1da177e4
LT
490 hdev->req_status = HCI_REQ_PEND;
491
42c6b129 492 func(&req, opt);
53cce22d 493
42c6b129
JH
494 err = hci_req_run(&req, hci_req_sync_complete);
495 if (err < 0) {
53cce22d 496 hdev->req_status = 0;
920c8300
AG
497
498 /* ENODATA means the HCI request command queue is empty.
499 * This can happen when a request with conditionals doesn't
500 * trigger any commands to be sent. This is normal behavior
501 * and should not trigger an error return.
42c6b129 502 */
920c8300
AG
503 if (err == -ENODATA)
504 return 0;
505
506 return err;
53cce22d
JH
507 }
508
bc4445c7
AG
509 add_wait_queue(&hdev->req_wait_q, &wait);
510 set_current_state(TASK_INTERRUPTIBLE);
511
1da177e4
LT
512 schedule_timeout(timeout);
513
514 remove_wait_queue(&hdev->req_wait_q, &wait);
515
516 if (signal_pending(current))
517 return -EINTR;
518
519 switch (hdev->req_status) {
520 case HCI_REQ_DONE:
e175072f 521 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
522 break;
523
524 case HCI_REQ_CANCELED:
525 err = -hdev->req_result;
526 break;
527
528 default:
529 err = -ETIMEDOUT;
530 break;
3ff50b79 531 }
1da177e4 532
a5040efa 533 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
534
535 BT_DBG("%s end: err %d", hdev->name, err);
536
537 return err;
538}
539
01178cd4 540static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
541 void (*req)(struct hci_request *req,
542 unsigned long opt),
01178cd4 543 unsigned long opt, __u32 timeout)
1da177e4
LT
544{
545 int ret;
546
7c6a329e
MH
547 if (!test_bit(HCI_UP, &hdev->flags))
548 return -ENETDOWN;
549
1da177e4
LT
550 /* Serialize all requests */
551 hci_req_lock(hdev);
01178cd4 552 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
553 hci_req_unlock(hdev);
554
555 return ret;
556}
557
42c6b129 558static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 559{
42c6b129 560 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
561
562 /* Reset device */
42c6b129
JH
563 set_bit(HCI_RESET, &req->hdev->flags);
564 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
565}
566
42c6b129 567static void bredr_init(struct hci_request *req)
1da177e4 568{
42c6b129 569 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 570
1da177e4 571 /* Read Local Supported Features */
42c6b129 572 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 573
1143e5a6 574 /* Read Local Version */
42c6b129 575 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
576
577 /* Read BD Address */
42c6b129 578 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
579}
580
42c6b129 581static void amp_init(struct hci_request *req)
e61ef499 582{
42c6b129 583 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 584
e61ef499 585 /* Read Local Version */
42c6b129 586 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 587
f6996cfe
MH
588 /* Read Local Supported Commands */
589 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
590
591 /* Read Local Supported Features */
592 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
593
6bcbc489 594 /* Read Local AMP Info */
42c6b129 595 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
596
597 /* Read Data Blk size */
42c6b129 598 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 599
f38ba941
MH
600 /* Read Flow Control Mode */
601 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
602
7528ca1c
MH
603 /* Read Location Data */
604 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
605}
606
42c6b129 607static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 608{
42c6b129 609 struct hci_dev *hdev = req->hdev;
e61ef499
AE
610
611 BT_DBG("%s %ld", hdev->name, opt);
612
11778716
AE
613 /* Reset */
614 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 615 hci_reset_req(req, 0);
11778716 616
e61ef499
AE
617 switch (hdev->dev_type) {
618 case HCI_BREDR:
42c6b129 619 bredr_init(req);
e61ef499
AE
620 break;
621
622 case HCI_AMP:
42c6b129 623 amp_init(req);
e61ef499
AE
624 break;
625
626 default:
627 BT_ERR("Unknown device type %d", hdev->dev_type);
628 break;
629 }
e61ef499
AE
630}
631
42c6b129 632static void bredr_setup(struct hci_request *req)
2177bab5 633{
4ca048e3
MH
634 struct hci_dev *hdev = req->hdev;
635
2177bab5
JH
636 __le16 param;
637 __u8 flt_type;
638
639 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 640 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
641
642 /* Read Class of Device */
42c6b129 643 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
644
645 /* Read Local Name */
42c6b129 646 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
647
648 /* Read Voice Setting */
42c6b129 649 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 650
b4cb9fb2
MH
651 /* Read Number of Supported IAC */
652 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
653
4b836f39
MH
654 /* Read Current IAC LAP */
655 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
656
2177bab5
JH
657 /* Clear Event Filters */
658 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 659 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
660
661 /* Connection accept timeout ~20 secs */
662 param = __constant_cpu_to_le16(0x7d00);
42c6b129 663 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 664
4ca048e3
MH
665 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
666 * but it does not support page scan related HCI commands.
667 */
668 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
669 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
670 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
671 }
2177bab5
JH
672}
673
42c6b129 674static void le_setup(struct hci_request *req)
2177bab5 675{
c73eee91
JH
676 struct hci_dev *hdev = req->hdev;
677
2177bab5 678 /* Read LE Buffer Size */
42c6b129 679 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
680
681 /* Read LE Local Supported Features */
42c6b129 682 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
683
684 /* Read LE Advertising Channel TX Power */
42c6b129 685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
686
687 /* Read LE White List Size */
42c6b129 688 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
689
690 /* Read LE Supported States */
42c6b129 691 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
692
693 /* LE-only controllers have LE implicitly enabled */
694 if (!lmp_bredr_capable(hdev))
695 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
696}
697
698static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
699{
700 if (lmp_ext_inq_capable(hdev))
701 return 0x02;
702
703 if (lmp_inq_rssi_capable(hdev))
704 return 0x01;
705
706 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
707 hdev->lmp_subver == 0x0757)
708 return 0x01;
709
710 if (hdev->manufacturer == 15) {
711 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
712 return 0x01;
713 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
714 return 0x01;
715 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
716 return 0x01;
717 }
718
719 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
720 hdev->lmp_subver == 0x1805)
721 return 0x01;
722
723 return 0x00;
724}
725
42c6b129 726static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
727{
728 u8 mode;
729
42c6b129 730 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 731
42c6b129 732 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
733}
734
42c6b129 735static void hci_setup_event_mask(struct hci_request *req)
2177bab5 736{
42c6b129
JH
737 struct hci_dev *hdev = req->hdev;
738
2177bab5
JH
739 /* The second byte is 0xff instead of 0x9f (two reserved bits
740 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
741 * command otherwise.
742 */
743 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
744
745 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
746 * any event mask for pre 1.2 devices.
747 */
748 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
749 return;
750
751 if (lmp_bredr_capable(hdev)) {
752 events[4] |= 0x01; /* Flow Specification Complete */
753 events[4] |= 0x02; /* Inquiry Result with RSSI */
754 events[4] |= 0x04; /* Read Remote Extended Features Complete */
755 events[5] |= 0x08; /* Synchronous Connection Complete */
756 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
757 } else {
758 /* Use a different default for LE-only devices */
759 memset(events, 0, sizeof(events));
760 events[0] |= 0x10; /* Disconnection Complete */
761 events[0] |= 0x80; /* Encryption Change */
762 events[1] |= 0x08; /* Read Remote Version Information Complete */
763 events[1] |= 0x20; /* Command Complete */
764 events[1] |= 0x40; /* Command Status */
765 events[1] |= 0x80; /* Hardware Error */
766 events[2] |= 0x04; /* Number of Completed Packets */
767 events[3] |= 0x02; /* Data Buffer Overflow */
768 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
769 }
770
771 if (lmp_inq_rssi_capable(hdev))
772 events[4] |= 0x02; /* Inquiry Result with RSSI */
773
774 if (lmp_sniffsubr_capable(hdev))
775 events[5] |= 0x20; /* Sniff Subrating */
776
777 if (lmp_pause_enc_capable(hdev))
778 events[5] |= 0x80; /* Encryption Key Refresh Complete */
779
780 if (lmp_ext_inq_capable(hdev))
781 events[5] |= 0x40; /* Extended Inquiry Result */
782
783 if (lmp_no_flush_capable(hdev))
784 events[7] |= 0x01; /* Enhanced Flush Complete */
785
786 if (lmp_lsto_capable(hdev))
787 events[6] |= 0x80; /* Link Supervision Timeout Changed */
788
789 if (lmp_ssp_capable(hdev)) {
790 events[6] |= 0x01; /* IO Capability Request */
791 events[6] |= 0x02; /* IO Capability Response */
792 events[6] |= 0x04; /* User Confirmation Request */
793 events[6] |= 0x08; /* User Passkey Request */
794 events[6] |= 0x10; /* Remote OOB Data Request */
795 events[6] |= 0x20; /* Simple Pairing Complete */
796 events[7] |= 0x04; /* User Passkey Notification */
797 events[7] |= 0x08; /* Keypress Notification */
798 events[7] |= 0x10; /* Remote Host Supported
799 * Features Notification
800 */
801 }
802
803 if (lmp_le_capable(hdev))
804 events[7] |= 0x20; /* LE Meta-Event */
805
42c6b129 806 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
807
808 if (lmp_le_capable(hdev)) {
809 memset(events, 0, sizeof(events));
810 events[0] = 0x1f;
42c6b129
JH
811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
812 sizeof(events), events);
2177bab5
JH
813 }
814}
815
42c6b129 816static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 817{
42c6b129
JH
818 struct hci_dev *hdev = req->hdev;
819
2177bab5 820 if (lmp_bredr_capable(hdev))
42c6b129 821 bredr_setup(req);
56f87901
JH
822 else
823 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
824
825 if (lmp_le_capable(hdev))
42c6b129 826 le_setup(req);
2177bab5 827
42c6b129 828 hci_setup_event_mask(req);
2177bab5 829
3f8e2d75
JH
830 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
831 * local supported commands HCI command.
832 */
833 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 834 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
835
836 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
837 /* When SSP is available, then the host features page
838 * should also be available as well. However some
839 * controllers list the max_page as 0 as long as SSP
840 * has not been enabled. To achieve proper debugging
841 * output, force the minimum max_page to 1 at least.
842 */
843 hdev->max_page = 0x01;
844
2177bab5
JH
845 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
846 u8 mode = 0x01;
42c6b129
JH
847 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
848 sizeof(mode), &mode);
2177bab5
JH
849 } else {
850 struct hci_cp_write_eir cp;
851
852 memset(hdev->eir, 0, sizeof(hdev->eir));
853 memset(&cp, 0, sizeof(cp));
854
42c6b129 855 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
856 }
857 }
858
859 if (lmp_inq_rssi_capable(hdev))
42c6b129 860 hci_setup_inquiry_mode(req);
2177bab5
JH
861
862 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 863 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
864
865 if (lmp_ext_feat_capable(hdev)) {
866 struct hci_cp_read_local_ext_features cp;
867
868 cp.page = 0x01;
42c6b129
JH
869 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
870 sizeof(cp), &cp);
2177bab5
JH
871 }
872
873 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
874 u8 enable = 1;
42c6b129
JH
875 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
876 &enable);
2177bab5
JH
877 }
878}
879
42c6b129 880static void hci_setup_link_policy(struct hci_request *req)
2177bab5 881{
42c6b129 882 struct hci_dev *hdev = req->hdev;
2177bab5
JH
883 struct hci_cp_write_def_link_policy cp;
884 u16 link_policy = 0;
885
886 if (lmp_rswitch_capable(hdev))
887 link_policy |= HCI_LP_RSWITCH;
888 if (lmp_hold_capable(hdev))
889 link_policy |= HCI_LP_HOLD;
890 if (lmp_sniff_capable(hdev))
891 link_policy |= HCI_LP_SNIFF;
892 if (lmp_park_capable(hdev))
893 link_policy |= HCI_LP_PARK;
894
895 cp.policy = cpu_to_le16(link_policy);
42c6b129 896 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
897}
898
42c6b129 899static void hci_set_le_support(struct hci_request *req)
2177bab5 900{
42c6b129 901 struct hci_dev *hdev = req->hdev;
2177bab5
JH
902 struct hci_cp_write_le_host_supported cp;
903
c73eee91
JH
904 /* LE-only devices do not support explicit enablement */
905 if (!lmp_bredr_capable(hdev))
906 return;
907
2177bab5
JH
908 memset(&cp, 0, sizeof(cp));
909
910 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
911 cp.le = 0x01;
912 cp.simul = lmp_le_br_capable(hdev);
913 }
914
915 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
916 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
917 &cp);
2177bab5
JH
918}
919
d62e6d67
JH
920static void hci_set_event_mask_page_2(struct hci_request *req)
921{
922 struct hci_dev *hdev = req->hdev;
923 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
924
925 /* If Connectionless Slave Broadcast master role is supported
926 * enable all necessary events for it.
927 */
928 if (hdev->features[2][0] & 0x01) {
929 events[1] |= 0x40; /* Triggered Clock Capture */
930 events[1] |= 0x80; /* Synchronization Train Complete */
931 events[2] |= 0x10; /* Slave Page Response Timeout */
932 events[2] |= 0x20; /* CSB Channel Map Change */
933 }
934
935 /* If Connectionless Slave Broadcast slave role is supported
936 * enable all necessary events for it.
937 */
938 if (hdev->features[2][0] & 0x02) {
939 events[2] |= 0x01; /* Synchronization Train Received */
940 events[2] |= 0x02; /* CSB Receive */
941 events[2] |= 0x04; /* CSB Timeout */
942 events[2] |= 0x08; /* Truncated Page Complete */
943 }
944
945 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
946}
947
42c6b129 948static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 949{
42c6b129 950 struct hci_dev *hdev = req->hdev;
d2c5d77f 951 u8 p;
42c6b129 952
b8f4e068
GP
953 /* Some Broadcom based Bluetooth controllers do not support the
954 * Delete Stored Link Key command. They are clearly indicating its
955 * absence in the bit mask of supported commands.
956 *
957 * Check the supported commands and only if the the command is marked
958 * as supported send it. If not supported assume that the controller
959 * does not have actual support for stored link keys which makes this
960 * command redundant anyway.
637b4cae 961 */
59f45d57
JH
962 if (hdev->commands[6] & 0x80) {
963 struct hci_cp_delete_stored_link_key cp;
964
965 bacpy(&cp.bdaddr, BDADDR_ANY);
966 cp.delete_all = 0x01;
967 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
968 sizeof(cp), &cp);
969 }
970
2177bab5 971 if (hdev->commands[5] & 0x10)
42c6b129 972 hci_setup_link_policy(req);
2177bab5 973
441ad2d0 974 if (lmp_le_capable(hdev))
42c6b129 975 hci_set_le_support(req);
d2c5d77f
JH
976
977 /* Read features beyond page 1 if available */
978 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
979 struct hci_cp_read_local_ext_features cp;
980
981 cp.page = p;
982 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
983 sizeof(cp), &cp);
984 }
2177bab5
JH
985}
986
5d4e7e8d
JH
987static void hci_init4_req(struct hci_request *req, unsigned long opt)
988{
989 struct hci_dev *hdev = req->hdev;
990
d62e6d67
JH
991 /* Set event mask page 2 if the HCI command for it is supported */
992 if (hdev->commands[22] & 0x04)
993 hci_set_event_mask_page_2(req);
994
5d4e7e8d
JH
995 /* Check for Synchronization Train support */
996 if (hdev->features[2][0] & 0x04)
997 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
998}
999
2177bab5
JH
1000static int __hci_init(struct hci_dev *hdev)
1001{
1002 int err;
1003
1004 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1005 if (err < 0)
1006 return err;
1007
1008 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1009 * BR/EDR/LE type controllers. AMP controllers only need the
1010 * first stage init.
1011 */
1012 if (hdev->dev_type != HCI_BREDR)
1013 return 0;
1014
1015 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1016 if (err < 0)
1017 return err;
1018
5d4e7e8d
JH
1019 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1020 if (err < 0)
1021 return err;
1022
baf27f6e
MH
1023 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1024 if (err < 0)
1025 return err;
1026
1027 /* Only create debugfs entries during the initial setup
1028 * phase and not every time the controller gets powered on.
1029 */
1030 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1031 return 0;
1032
dfb826a8
MH
1033 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1034 &features_fops);
ceeb3bc0
MH
1035 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1036 &hdev->manufacturer);
1037 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1038 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1039 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1040 &blacklist_fops);
47219839
MH
1041 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1042
baf27f6e
MH
1043 if (lmp_bredr_capable(hdev)) {
1044 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1045 hdev, &inquiry_cache_fops);
041000b9
MH
1046 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1047 hdev, &voice_setting_fops);
baf27f6e
MH
1048 }
1049
ebd1e33b
MH
1050 if (lmp_ssp_capable(hdev))
1051 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1052 hdev, &auto_accept_delay_fops);
1053
2bfa3531
MH
1054 if (lmp_sniff_capable(hdev)) {
1055 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1056 hdev, &idle_timeout_fops);
1057 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1058 hdev, &sniff_min_interval_fops);
1059 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1060 hdev, &sniff_max_interval_fops);
1061 }
1062
e7b8fc92
MH
1063 if (lmp_le_capable(hdev))
1064 debugfs_create_file("static_address", 0444, hdev->debugfs,
1065 hdev, &static_address_fops);
1066
baf27f6e 1067 return 0;
2177bab5
JH
1068}
1069
42c6b129 1070static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1071{
1072 __u8 scan = opt;
1073
42c6b129 1074 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1075
1076 /* Inquiry and Page scans */
42c6b129 1077 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1078}
1079
42c6b129 1080static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1081{
1082 __u8 auth = opt;
1083
42c6b129 1084 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1085
1086 /* Authentication */
42c6b129 1087 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1088}
1089
42c6b129 1090static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1091{
1092 __u8 encrypt = opt;
1093
42c6b129 1094 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1095
e4e8e37c 1096 /* Encryption */
42c6b129 1097 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1098}
1099
42c6b129 1100static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1101{
1102 __le16 policy = cpu_to_le16(opt);
1103
42c6b129 1104 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1105
1106 /* Default link policy */
42c6b129 1107 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1108}
1109
8e87d142 1110/* Get HCI device by index.
1da177e4
LT
1111 * Device is held on return. */
1112struct hci_dev *hci_dev_get(int index)
1113{
8035ded4 1114 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1115
1116 BT_DBG("%d", index);
1117
1118 if (index < 0)
1119 return NULL;
1120
1121 read_lock(&hci_dev_list_lock);
8035ded4 1122 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1123 if (d->id == index) {
1124 hdev = hci_dev_hold(d);
1125 break;
1126 }
1127 }
1128 read_unlock(&hci_dev_list_lock);
1129 return hdev;
1130}
1da177e4
LT
1131
1132/* ---- Inquiry support ---- */
ff9ef578 1133
30dc78e1
JH
1134bool hci_discovery_active(struct hci_dev *hdev)
1135{
1136 struct discovery_state *discov = &hdev->discovery;
1137
6fbe195d 1138 switch (discov->state) {
343f935b 1139 case DISCOVERY_FINDING:
6fbe195d 1140 case DISCOVERY_RESOLVING:
30dc78e1
JH
1141 return true;
1142
6fbe195d
AG
1143 default:
1144 return false;
1145 }
30dc78e1
JH
1146}
1147
ff9ef578
JH
1148void hci_discovery_set_state(struct hci_dev *hdev, int state)
1149{
1150 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1151
1152 if (hdev->discovery.state == state)
1153 return;
1154
1155 switch (state) {
1156 case DISCOVERY_STOPPED:
7b99b659
AG
1157 if (hdev->discovery.state != DISCOVERY_STARTING)
1158 mgmt_discovering(hdev, 0);
ff9ef578
JH
1159 break;
1160 case DISCOVERY_STARTING:
1161 break;
343f935b 1162 case DISCOVERY_FINDING:
ff9ef578
JH
1163 mgmt_discovering(hdev, 1);
1164 break;
30dc78e1
JH
1165 case DISCOVERY_RESOLVING:
1166 break;
ff9ef578
JH
1167 case DISCOVERY_STOPPING:
1168 break;
1169 }
1170
1171 hdev->discovery.state = state;
1172}
1173
1f9b9a5d 1174void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1175{
30883512 1176 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1177 struct inquiry_entry *p, *n;
1da177e4 1178
561aafbc
JH
1179 list_for_each_entry_safe(p, n, &cache->all, all) {
1180 list_del(&p->all);
b57c1a56 1181 kfree(p);
1da177e4 1182 }
561aafbc
JH
1183
1184 INIT_LIST_HEAD(&cache->unknown);
1185 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1186}
1187
a8c5fb1a
GP
1188struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1189 bdaddr_t *bdaddr)
1da177e4 1190{
30883512 1191 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1192 struct inquiry_entry *e;
1193
6ed93dc6 1194 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1195
561aafbc
JH
1196 list_for_each_entry(e, &cache->all, all) {
1197 if (!bacmp(&e->data.bdaddr, bdaddr))
1198 return e;
1199 }
1200
1201 return NULL;
1202}
1203
1204struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1205 bdaddr_t *bdaddr)
561aafbc 1206{
30883512 1207 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1208 struct inquiry_entry *e;
1209
6ed93dc6 1210 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1211
1212 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1213 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1214 return e;
1215 }
1216
1217 return NULL;
1da177e4
LT
1218}
1219
30dc78e1 1220struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1221 bdaddr_t *bdaddr,
1222 int state)
30dc78e1
JH
1223{
1224 struct discovery_state *cache = &hdev->discovery;
1225 struct inquiry_entry *e;
1226
6ed93dc6 1227 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1228
1229 list_for_each_entry(e, &cache->resolve, list) {
1230 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1231 return e;
1232 if (!bacmp(&e->data.bdaddr, bdaddr))
1233 return e;
1234 }
1235
1236 return NULL;
1237}
1238
a3d4e20a 1239void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1240 struct inquiry_entry *ie)
a3d4e20a
JH
1241{
1242 struct discovery_state *cache = &hdev->discovery;
1243 struct list_head *pos = &cache->resolve;
1244 struct inquiry_entry *p;
1245
1246 list_del(&ie->list);
1247
1248 list_for_each_entry(p, &cache->resolve, list) {
1249 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1250 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1251 break;
1252 pos = &p->list;
1253 }
1254
1255 list_add(&ie->list, pos);
1256}
1257
3175405b 1258bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1259 bool name_known, bool *ssp)
1da177e4 1260{
30883512 1261 struct discovery_state *cache = &hdev->discovery;
70f23020 1262 struct inquiry_entry *ie;
1da177e4 1263
6ed93dc6 1264 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1265
2b2fec4d
SJ
1266 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1267
388fc8fa
JH
1268 if (ssp)
1269 *ssp = data->ssp_mode;
1270
70f23020 1271 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1272 if (ie) {
388fc8fa
JH
1273 if (ie->data.ssp_mode && ssp)
1274 *ssp = true;
1275
a3d4e20a 1276 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1277 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1278 ie->data.rssi = data->rssi;
1279 hci_inquiry_cache_update_resolve(hdev, ie);
1280 }
1281
561aafbc 1282 goto update;
a3d4e20a 1283 }
561aafbc
JH
1284
1285 /* Entry not in the cache. Add new one. */
1286 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1287 if (!ie)
3175405b 1288 return false;
561aafbc
JH
1289
1290 list_add(&ie->all, &cache->all);
1291
1292 if (name_known) {
1293 ie->name_state = NAME_KNOWN;
1294 } else {
1295 ie->name_state = NAME_NOT_KNOWN;
1296 list_add(&ie->list, &cache->unknown);
1297 }
70f23020 1298
561aafbc
JH
1299update:
1300 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1301 ie->name_state != NAME_PENDING) {
561aafbc
JH
1302 ie->name_state = NAME_KNOWN;
1303 list_del(&ie->list);
1da177e4
LT
1304 }
1305
70f23020
AE
1306 memcpy(&ie->data, data, sizeof(*data));
1307 ie->timestamp = jiffies;
1da177e4 1308 cache->timestamp = jiffies;
3175405b
JH
1309
1310 if (ie->name_state == NAME_NOT_KNOWN)
1311 return false;
1312
1313 return true;
1da177e4
LT
1314}
1315
1316static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1317{
30883512 1318 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1319 struct inquiry_info *info = (struct inquiry_info *) buf;
1320 struct inquiry_entry *e;
1321 int copied = 0;
1322
561aafbc 1323 list_for_each_entry(e, &cache->all, all) {
1da177e4 1324 struct inquiry_data *data = &e->data;
b57c1a56
JH
1325
1326 if (copied >= num)
1327 break;
1328
1da177e4
LT
1329 bacpy(&info->bdaddr, &data->bdaddr);
1330 info->pscan_rep_mode = data->pscan_rep_mode;
1331 info->pscan_period_mode = data->pscan_period_mode;
1332 info->pscan_mode = data->pscan_mode;
1333 memcpy(info->dev_class, data->dev_class, 3);
1334 info->clock_offset = data->clock_offset;
b57c1a56 1335
1da177e4 1336 info++;
b57c1a56 1337 copied++;
1da177e4
LT
1338 }
1339
1340 BT_DBG("cache %p, copied %d", cache, copied);
1341 return copied;
1342}
1343
42c6b129 1344static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1345{
1346 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1347 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1348 struct hci_cp_inquiry cp;
1349
1350 BT_DBG("%s", hdev->name);
1351
1352 if (test_bit(HCI_INQUIRY, &hdev->flags))
1353 return;
1354
1355 /* Start Inquiry */
1356 memcpy(&cp.lap, &ir->lap, 3);
1357 cp.length = ir->length;
1358 cp.num_rsp = ir->num_rsp;
42c6b129 1359 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1360}
1361
3e13fa1e
AG
1362static int wait_inquiry(void *word)
1363{
1364 schedule();
1365 return signal_pending(current);
1366}
1367
1da177e4
LT
1368int hci_inquiry(void __user *arg)
1369{
1370 __u8 __user *ptr = arg;
1371 struct hci_inquiry_req ir;
1372 struct hci_dev *hdev;
1373 int err = 0, do_inquiry = 0, max_rsp;
1374 long timeo;
1375 __u8 *buf;
1376
1377 if (copy_from_user(&ir, ptr, sizeof(ir)))
1378 return -EFAULT;
1379
5a08ecce
AE
1380 hdev = hci_dev_get(ir.dev_id);
1381 if (!hdev)
1da177e4
LT
1382 return -ENODEV;
1383
0736cfa8
MH
1384 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1385 err = -EBUSY;
1386 goto done;
1387 }
1388
5b69bef5
MH
1389 if (hdev->dev_type != HCI_BREDR) {
1390 err = -EOPNOTSUPP;
1391 goto done;
1392 }
1393
56f87901
JH
1394 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1395 err = -EOPNOTSUPP;
1396 goto done;
1397 }
1398
09fd0de5 1399 hci_dev_lock(hdev);
8e87d142 1400 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1401 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1402 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1403 do_inquiry = 1;
1404 }
09fd0de5 1405 hci_dev_unlock(hdev);
1da177e4 1406
04837f64 1407 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1408
1409 if (do_inquiry) {
01178cd4
JH
1410 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1411 timeo);
70f23020
AE
1412 if (err < 0)
1413 goto done;
3e13fa1e
AG
1414
1415 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1416 * cleared). If it is interrupted by a signal, return -EINTR.
1417 */
1418 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1419 TASK_INTERRUPTIBLE))
1420 return -EINTR;
70f23020 1421 }
1da177e4 1422
8fc9ced3
GP
1423 /* for unlimited number of responses we will use buffer with
1424 * 255 entries
1425 */
1da177e4
LT
1426 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1427
1428 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1429 * copy it to the user space.
1430 */
01df8c31 1431 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1432 if (!buf) {
1da177e4
LT
1433 err = -ENOMEM;
1434 goto done;
1435 }
1436
09fd0de5 1437 hci_dev_lock(hdev);
1da177e4 1438 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1439 hci_dev_unlock(hdev);
1da177e4
LT
1440
1441 BT_DBG("num_rsp %d", ir.num_rsp);
1442
1443 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1444 ptr += sizeof(ir);
1445 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1446 ir.num_rsp))
1da177e4 1447 err = -EFAULT;
8e87d142 1448 } else
1da177e4
LT
1449 err = -EFAULT;
1450
1451 kfree(buf);
1452
1453done:
1454 hci_dev_put(hdev);
1455 return err;
1456}
1457
cbed0ca1 1458static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1459{
1da177e4
LT
1460 int ret = 0;
1461
1da177e4
LT
1462 BT_DBG("%s %p", hdev->name, hdev);
1463
1464 hci_req_lock(hdev);
1465
94324962
JH
1466 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1467 ret = -ENODEV;
1468 goto done;
1469 }
1470
a5c8f270
MH
1471 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1472 /* Check for rfkill but allow the HCI setup stage to
1473 * proceed (which in itself doesn't cause any RF activity).
1474 */
1475 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1476 ret = -ERFKILL;
1477 goto done;
1478 }
1479
1480 /* Check for valid public address or a configured static
1481 * random adddress, but let the HCI setup proceed to
1482 * be able to determine if there is a public address
1483 * or not.
1484 *
1485 * This check is only valid for BR/EDR controllers
1486 * since AMP controllers do not have an address.
1487 */
1488 if (hdev->dev_type == HCI_BREDR &&
1489 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1490 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1491 ret = -EADDRNOTAVAIL;
1492 goto done;
1493 }
611b30f7
MH
1494 }
1495
1da177e4
LT
1496 if (test_bit(HCI_UP, &hdev->flags)) {
1497 ret = -EALREADY;
1498 goto done;
1499 }
1500
1da177e4
LT
1501 if (hdev->open(hdev)) {
1502 ret = -EIO;
1503 goto done;
1504 }
1505
f41c70c4
MH
1506 atomic_set(&hdev->cmd_cnt, 1);
1507 set_bit(HCI_INIT, &hdev->flags);
1508
1509 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1510 ret = hdev->setup(hdev);
1511
1512 if (!ret) {
f41c70c4
MH
1513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1514 set_bit(HCI_RAW, &hdev->flags);
1515
0736cfa8
MH
1516 if (!test_bit(HCI_RAW, &hdev->flags) &&
1517 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1518 ret = __hci_init(hdev);
1da177e4
LT
1519 }
1520
f41c70c4
MH
1521 clear_bit(HCI_INIT, &hdev->flags);
1522
1da177e4
LT
1523 if (!ret) {
1524 hci_dev_hold(hdev);
1525 set_bit(HCI_UP, &hdev->flags);
1526 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1527 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1528 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1529 hdev->dev_type == HCI_BREDR) {
09fd0de5 1530 hci_dev_lock(hdev);
744cf19e 1531 mgmt_powered(hdev, 1);
09fd0de5 1532 hci_dev_unlock(hdev);
56e5cb86 1533 }
8e87d142 1534 } else {
1da177e4 1535 /* Init failed, cleanup */
3eff45ea 1536 flush_work(&hdev->tx_work);
c347b765 1537 flush_work(&hdev->cmd_work);
b78752cc 1538 flush_work(&hdev->rx_work);
1da177e4
LT
1539
1540 skb_queue_purge(&hdev->cmd_q);
1541 skb_queue_purge(&hdev->rx_q);
1542
1543 if (hdev->flush)
1544 hdev->flush(hdev);
1545
1546 if (hdev->sent_cmd) {
1547 kfree_skb(hdev->sent_cmd);
1548 hdev->sent_cmd = NULL;
1549 }
1550
1551 hdev->close(hdev);
1552 hdev->flags = 0;
1553 }
1554
1555done:
1556 hci_req_unlock(hdev);
1da177e4
LT
1557 return ret;
1558}
1559
cbed0ca1
JH
1560/* ---- HCI ioctl helpers ---- */
1561
1562int hci_dev_open(__u16 dev)
1563{
1564 struct hci_dev *hdev;
1565 int err;
1566
1567 hdev = hci_dev_get(dev);
1568 if (!hdev)
1569 return -ENODEV;
1570
e1d08f40
JH
1571 /* We need to ensure that no other power on/off work is pending
1572 * before proceeding to call hci_dev_do_open. This is
1573 * particularly important if the setup procedure has not yet
1574 * completed.
1575 */
1576 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1577 cancel_delayed_work(&hdev->power_off);
1578
a5c8f270
MH
1579 /* After this call it is guaranteed that the setup procedure
1580 * has finished. This means that error conditions like RFKILL
1581 * or no valid public or static random address apply.
1582 */
e1d08f40
JH
1583 flush_workqueue(hdev->req_workqueue);
1584
cbed0ca1
JH
1585 err = hci_dev_do_open(hdev);
1586
1587 hci_dev_put(hdev);
1588
1589 return err;
1590}
1591
1da177e4
LT
1592static int hci_dev_do_close(struct hci_dev *hdev)
1593{
1594 BT_DBG("%s %p", hdev->name, hdev);
1595
78c04c0b
VCG
1596 cancel_delayed_work(&hdev->power_off);
1597
1da177e4
LT
1598 hci_req_cancel(hdev, ENODEV);
1599 hci_req_lock(hdev);
1600
1601 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1602 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1603 hci_req_unlock(hdev);
1604 return 0;
1605 }
1606
3eff45ea
GP
1607 /* Flush RX and TX works */
1608 flush_work(&hdev->tx_work);
b78752cc 1609 flush_work(&hdev->rx_work);
1da177e4 1610
16ab91ab 1611 if (hdev->discov_timeout > 0) {
e0f9309f 1612 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1613 hdev->discov_timeout = 0;
5e5282bb 1614 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1615 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1616 }
1617
a8b2d5c2 1618 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1619 cancel_delayed_work(&hdev->service_cache);
1620
7ba8b4be
AG
1621 cancel_delayed_work_sync(&hdev->le_scan_disable);
1622
09fd0de5 1623 hci_dev_lock(hdev);
1f9b9a5d 1624 hci_inquiry_cache_flush(hdev);
1da177e4 1625 hci_conn_hash_flush(hdev);
09fd0de5 1626 hci_dev_unlock(hdev);
1da177e4
LT
1627
1628 hci_notify(hdev, HCI_DEV_DOWN);
1629
1630 if (hdev->flush)
1631 hdev->flush(hdev);
1632
1633 /* Reset device */
1634 skb_queue_purge(&hdev->cmd_q);
1635 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1636 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1637 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1638 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1639 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1640 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1641 clear_bit(HCI_INIT, &hdev->flags);
1642 }
1643
c347b765
GP
1644 /* flush cmd work */
1645 flush_work(&hdev->cmd_work);
1da177e4
LT
1646
1647 /* Drop queues */
1648 skb_queue_purge(&hdev->rx_q);
1649 skb_queue_purge(&hdev->cmd_q);
1650 skb_queue_purge(&hdev->raw_q);
1651
1652 /* Drop last sent command */
1653 if (hdev->sent_cmd) {
b79f44c1 1654 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1655 kfree_skb(hdev->sent_cmd);
1656 hdev->sent_cmd = NULL;
1657 }
1658
b6ddb638
JH
1659 kfree_skb(hdev->recv_evt);
1660 hdev->recv_evt = NULL;
1661
1da177e4
LT
1662 /* After this point our queues are empty
1663 * and no tasks are scheduled. */
1664 hdev->close(hdev);
1665
35b973c9
JH
1666 /* Clear flags */
1667 hdev->flags = 0;
1668 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1669
93c311a0
MH
1670 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1671 if (hdev->dev_type == HCI_BREDR) {
1672 hci_dev_lock(hdev);
1673 mgmt_powered(hdev, 0);
1674 hci_dev_unlock(hdev);
1675 }
8ee56540 1676 }
5add6af8 1677
ced5c338 1678 /* Controller radio is available but is currently powered down */
536619e8 1679 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1680
e59fda8d 1681 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1682 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1683
1da177e4
LT
1684 hci_req_unlock(hdev);
1685
1686 hci_dev_put(hdev);
1687 return 0;
1688}
1689
1690int hci_dev_close(__u16 dev)
1691{
1692 struct hci_dev *hdev;
1693 int err;
1694
70f23020
AE
1695 hdev = hci_dev_get(dev);
1696 if (!hdev)
1da177e4 1697 return -ENODEV;
8ee56540 1698
0736cfa8
MH
1699 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1700 err = -EBUSY;
1701 goto done;
1702 }
1703
8ee56540
MH
1704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1705 cancel_delayed_work(&hdev->power_off);
1706
1da177e4 1707 err = hci_dev_do_close(hdev);
8ee56540 1708
0736cfa8 1709done:
1da177e4
LT
1710 hci_dev_put(hdev);
1711 return err;
1712}
1713
1714int hci_dev_reset(__u16 dev)
1715{
1716 struct hci_dev *hdev;
1717 int ret = 0;
1718
70f23020
AE
1719 hdev = hci_dev_get(dev);
1720 if (!hdev)
1da177e4
LT
1721 return -ENODEV;
1722
1723 hci_req_lock(hdev);
1da177e4 1724
808a049e
MH
1725 if (!test_bit(HCI_UP, &hdev->flags)) {
1726 ret = -ENETDOWN;
1da177e4 1727 goto done;
808a049e 1728 }
1da177e4 1729
0736cfa8
MH
1730 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1731 ret = -EBUSY;
1732 goto done;
1733 }
1734
1da177e4
LT
1735 /* Drop queues */
1736 skb_queue_purge(&hdev->rx_q);
1737 skb_queue_purge(&hdev->cmd_q);
1738
09fd0de5 1739 hci_dev_lock(hdev);
1f9b9a5d 1740 hci_inquiry_cache_flush(hdev);
1da177e4 1741 hci_conn_hash_flush(hdev);
09fd0de5 1742 hci_dev_unlock(hdev);
1da177e4
LT
1743
1744 if (hdev->flush)
1745 hdev->flush(hdev);
1746
8e87d142 1747 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1748 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1749
1750 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1751 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1752
1753done:
1da177e4
LT
1754 hci_req_unlock(hdev);
1755 hci_dev_put(hdev);
1756 return ret;
1757}
1758
1759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
70f23020
AE
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1da177e4
LT
1766 return -ENODEV;
1767
0736cfa8
MH
1768 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1769 ret = -EBUSY;
1770 goto done;
1771 }
1772
1da177e4
LT
1773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
0736cfa8 1775done:
1da177e4 1776 hci_dev_put(hdev);
1da177e4
LT
1777 return ret;
1778}
1779
1780int hci_dev_cmd(unsigned int cmd, void __user *arg)
1781{
1782 struct hci_dev *hdev;
1783 struct hci_dev_req dr;
1784 int err = 0;
1785
1786 if (copy_from_user(&dr, arg, sizeof(dr)))
1787 return -EFAULT;
1788
70f23020
AE
1789 hdev = hci_dev_get(dr.dev_id);
1790 if (!hdev)
1da177e4
LT
1791 return -ENODEV;
1792
0736cfa8
MH
1793 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1794 err = -EBUSY;
1795 goto done;
1796 }
1797
5b69bef5
MH
1798 if (hdev->dev_type != HCI_BREDR) {
1799 err = -EOPNOTSUPP;
1800 goto done;
1801 }
1802
56f87901
JH
1803 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1804 err = -EOPNOTSUPP;
1805 goto done;
1806 }
1807
1da177e4
LT
1808 switch (cmd) {
1809 case HCISETAUTH:
01178cd4
JH
1810 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1811 HCI_INIT_TIMEOUT);
1da177e4
LT
1812 break;
1813
1814 case HCISETENCRYPT:
1815 if (!lmp_encrypt_capable(hdev)) {
1816 err = -EOPNOTSUPP;
1817 break;
1818 }
1819
1820 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1821 /* Auth must be enabled first */
01178cd4
JH
1822 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1823 HCI_INIT_TIMEOUT);
1da177e4
LT
1824 if (err)
1825 break;
1826 }
1827
01178cd4
JH
1828 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1829 HCI_INIT_TIMEOUT);
1da177e4
LT
1830 break;
1831
1832 case HCISETSCAN:
01178cd4
JH
1833 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1834 HCI_INIT_TIMEOUT);
1da177e4
LT
1835 break;
1836
1da177e4 1837 case HCISETLINKPOL:
01178cd4
JH
1838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1839 HCI_INIT_TIMEOUT);
1da177e4
LT
1840 break;
1841
1842 case HCISETLINKMODE:
e4e8e37c
MH
1843 hdev->link_mode = ((__u16) dr.dev_opt) &
1844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1845 break;
1846
1847 case HCISETPTYPE:
1848 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1849 break;
1850
1851 case HCISETACLMTU:
e4e8e37c
MH
1852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1854 break;
1855
1856 case HCISETSCOMTU:
e4e8e37c
MH
1857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1859 break;
1860
1861 default:
1862 err = -EINVAL;
1863 break;
1864 }
e4e8e37c 1865
0736cfa8 1866done:
1da177e4
LT
1867 hci_dev_put(hdev);
1868 return err;
1869}
1870
1871int hci_get_dev_list(void __user *arg)
1872{
8035ded4 1873 struct hci_dev *hdev;
1da177e4
LT
1874 struct hci_dev_list_req *dl;
1875 struct hci_dev_req *dr;
1da177e4
LT
1876 int n = 0, size, err;
1877 __u16 dev_num;
1878
1879 if (get_user(dev_num, (__u16 __user *) arg))
1880 return -EFAULT;
1881
1882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1883 return -EINVAL;
1884
1885 size = sizeof(*dl) + dev_num * sizeof(*dr);
1886
70f23020
AE
1887 dl = kzalloc(size, GFP_KERNEL);
1888 if (!dl)
1da177e4
LT
1889 return -ENOMEM;
1890
1891 dr = dl->dev_req;
1892
f20d09d5 1893 read_lock(&hci_dev_list_lock);
8035ded4 1894 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1895 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1896 cancel_delayed_work(&hdev->power_off);
c542a06c 1897
a8b2d5c2
JH
1898 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1899 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1900
1da177e4
LT
1901 (dr + n)->dev_id = hdev->id;
1902 (dr + n)->dev_opt = hdev->flags;
c542a06c 1903
1da177e4
LT
1904 if (++n >= dev_num)
1905 break;
1906 }
f20d09d5 1907 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1908
1909 dl->dev_num = n;
1910 size = sizeof(*dl) + n * sizeof(*dr);
1911
1912 err = copy_to_user(arg, dl, size);
1913 kfree(dl);
1914
1915 return err ? -EFAULT : 0;
1916}
1917
1918int hci_get_dev_info(void __user *arg)
1919{
1920 struct hci_dev *hdev;
1921 struct hci_dev_info di;
1922 int err = 0;
1923
1924 if (copy_from_user(&di, arg, sizeof(di)))
1925 return -EFAULT;
1926
70f23020
AE
1927 hdev = hci_dev_get(di.dev_id);
1928 if (!hdev)
1da177e4
LT
1929 return -ENODEV;
1930
a8b2d5c2 1931 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1932 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1933
a8b2d5c2
JH
1934 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1935 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1936
1da177e4
LT
1937 strcpy(di.name, hdev->name);
1938 di.bdaddr = hdev->bdaddr;
60f2a3ed 1939 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1940 di.flags = hdev->flags;
1941 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1942 if (lmp_bredr_capable(hdev)) {
1943 di.acl_mtu = hdev->acl_mtu;
1944 di.acl_pkts = hdev->acl_pkts;
1945 di.sco_mtu = hdev->sco_mtu;
1946 di.sco_pkts = hdev->sco_pkts;
1947 } else {
1948 di.acl_mtu = hdev->le_mtu;
1949 di.acl_pkts = hdev->le_pkts;
1950 di.sco_mtu = 0;
1951 di.sco_pkts = 0;
1952 }
1da177e4
LT
1953 di.link_policy = hdev->link_policy;
1954 di.link_mode = hdev->link_mode;
1955
1956 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1957 memcpy(&di.features, &hdev->features, sizeof(di.features));
1958
1959 if (copy_to_user(arg, &di, sizeof(di)))
1960 err = -EFAULT;
1961
1962 hci_dev_put(hdev);
1963
1964 return err;
1965}
1966
1967/* ---- Interface to HCI drivers ---- */
1968
611b30f7
MH
1969static int hci_rfkill_set_block(void *data, bool blocked)
1970{
1971 struct hci_dev *hdev = data;
1972
1973 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1974
0736cfa8
MH
1975 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1976 return -EBUSY;
1977
5e130367
JH
1978 if (blocked) {
1979 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1980 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1981 hci_dev_do_close(hdev);
5e130367
JH
1982 } else {
1983 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1984 }
611b30f7
MH
1985
1986 return 0;
1987}
1988
1989static const struct rfkill_ops hci_rfkill_ops = {
1990 .set_block = hci_rfkill_set_block,
1991};
1992
ab81cbf9
JH
1993static void hci_power_on(struct work_struct *work)
1994{
1995 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1996 int err;
ab81cbf9
JH
1997
1998 BT_DBG("%s", hdev->name);
1999
cbed0ca1 2000 err = hci_dev_do_open(hdev);
96570ffc
JH
2001 if (err < 0) {
2002 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2003 return;
96570ffc 2004 }
ab81cbf9 2005
a5c8f270
MH
2006 /* During the HCI setup phase, a few error conditions are
2007 * ignored and they need to be checked now. If they are still
2008 * valid, it is important to turn the device back off.
2009 */
2010 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2011 (hdev->dev_type == HCI_BREDR &&
2012 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2013 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2014 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2015 hci_dev_do_close(hdev);
2016 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2017 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2018 HCI_AUTO_OFF_TIMEOUT);
bf543036 2019 }
ab81cbf9 2020
a8b2d5c2 2021 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2022 mgmt_index_added(hdev);
ab81cbf9
JH
2023}
2024
2025static void hci_power_off(struct work_struct *work)
2026{
3243553f 2027 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2028 power_off.work);
ab81cbf9
JH
2029
2030 BT_DBG("%s", hdev->name);
2031
8ee56540 2032 hci_dev_do_close(hdev);
ab81cbf9
JH
2033}
2034
16ab91ab
JH
2035static void hci_discov_off(struct work_struct *work)
2036{
2037 struct hci_dev *hdev;
16ab91ab
JH
2038
2039 hdev = container_of(work, struct hci_dev, discov_off.work);
2040
2041 BT_DBG("%s", hdev->name);
2042
d1967ff8 2043 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2044}
2045
2aeb9a1a
JH
2046int hci_uuids_clear(struct hci_dev *hdev)
2047{
4821002c 2048 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2049
4821002c
JH
2050 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2051 list_del(&uuid->list);
2aeb9a1a
JH
2052 kfree(uuid);
2053 }
2054
2055 return 0;
2056}
2057
55ed8ca1
JH
2058int hci_link_keys_clear(struct hci_dev *hdev)
2059{
2060 struct list_head *p, *n;
2061
2062 list_for_each_safe(p, n, &hdev->link_keys) {
2063 struct link_key *key;
2064
2065 key = list_entry(p, struct link_key, list);
2066
2067 list_del(p);
2068 kfree(key);
2069 }
2070
2071 return 0;
2072}
2073
b899efaf
VCG
2074int hci_smp_ltks_clear(struct hci_dev *hdev)
2075{
2076 struct smp_ltk *k, *tmp;
2077
2078 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2079 list_del(&k->list);
2080 kfree(k);
2081 }
2082
2083 return 0;
2084}
2085
55ed8ca1
JH
2086struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2087{
8035ded4 2088 struct link_key *k;
55ed8ca1 2089
8035ded4 2090 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2091 if (bacmp(bdaddr, &k->bdaddr) == 0)
2092 return k;
55ed8ca1
JH
2093
2094 return NULL;
2095}
2096
745c0ce3 2097static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2098 u8 key_type, u8 old_key_type)
d25e28ab
JH
2099{
2100 /* Legacy key */
2101 if (key_type < 0x03)
745c0ce3 2102 return true;
d25e28ab
JH
2103
2104 /* Debug keys are insecure so don't store them persistently */
2105 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2106 return false;
d25e28ab
JH
2107
2108 /* Changed combination key and there's no previous one */
2109 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2110 return false;
d25e28ab
JH
2111
2112 /* Security mode 3 case */
2113 if (!conn)
745c0ce3 2114 return true;
d25e28ab
JH
2115
2116 /* Neither local nor remote side had no-bonding as requirement */
2117 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2118 return true;
d25e28ab
JH
2119
2120 /* Local side had dedicated bonding as requirement */
2121 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2122 return true;
d25e28ab
JH
2123
2124 /* Remote side had dedicated bonding as requirement */
2125 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2126 return true;
d25e28ab
JH
2127
2128 /* If none of the above criteria match, then don't store the key
2129 * persistently */
745c0ce3 2130 return false;
d25e28ab
JH
2131}
2132
c9839a11 2133struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 2134{
c9839a11 2135 struct smp_ltk *k;
75d262c2 2136
c9839a11
VCG
2137 list_for_each_entry(k, &hdev->long_term_keys, list) {
2138 if (k->ediv != ediv ||
a8c5fb1a 2139 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2140 continue;
2141
c9839a11 2142 return k;
75d262c2
VCG
2143 }
2144
2145 return NULL;
2146}
75d262c2 2147
c9839a11 2148struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2149 u8 addr_type)
75d262c2 2150{
c9839a11 2151 struct smp_ltk *k;
75d262c2 2152
c9839a11
VCG
2153 list_for_each_entry(k, &hdev->long_term_keys, list)
2154 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2155 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2156 return k;
2157
2158 return NULL;
2159}
75d262c2 2160
d25e28ab 2161int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2162 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2163{
2164 struct link_key *key, *old_key;
745c0ce3
VA
2165 u8 old_key_type;
2166 bool persistent;
55ed8ca1
JH
2167
2168 old_key = hci_find_link_key(hdev, bdaddr);
2169 if (old_key) {
2170 old_key_type = old_key->type;
2171 key = old_key;
2172 } else {
12adcf3a 2173 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2174 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2175 if (!key)
2176 return -ENOMEM;
2177 list_add(&key->list, &hdev->link_keys);
2178 }
2179
6ed93dc6 2180 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2181
d25e28ab
JH
2182 /* Some buggy controller combinations generate a changed
2183 * combination key for legacy pairing even when there's no
2184 * previous key */
2185 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2186 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2187 type = HCI_LK_COMBINATION;
655fe6ec
JH
2188 if (conn)
2189 conn->key_type = type;
2190 }
d25e28ab 2191
55ed8ca1 2192 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2193 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2194 key->pin_len = pin_len;
2195
b6020ba0 2196 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2197 key->type = old_key_type;
4748fed2
JH
2198 else
2199 key->type = type;
2200
4df378a1
JH
2201 if (!new_key)
2202 return 0;
2203
2204 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2205
744cf19e 2206 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2207
6ec5bcad
VA
2208 if (conn)
2209 conn->flush_key = !persistent;
55ed8ca1
JH
2210
2211 return 0;
2212}
2213
c9839a11 2214int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2215 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2216 ediv, u8 rand[8])
75d262c2 2217{
c9839a11 2218 struct smp_ltk *key, *old_key;
75d262c2 2219
c9839a11
VCG
2220 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2221 return 0;
75d262c2 2222
c9839a11
VCG
2223 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2224 if (old_key)
75d262c2 2225 key = old_key;
c9839a11
VCG
2226 else {
2227 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2228 if (!key)
2229 return -ENOMEM;
c9839a11 2230 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2231 }
2232
75d262c2 2233 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2234 key->bdaddr_type = addr_type;
2235 memcpy(key->val, tk, sizeof(key->val));
2236 key->authenticated = authenticated;
2237 key->ediv = ediv;
2238 key->enc_size = enc_size;
2239 key->type = type;
2240 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2241
c9839a11
VCG
2242 if (!new_key)
2243 return 0;
75d262c2 2244
261cc5aa
VCG
2245 if (type & HCI_SMP_LTK)
2246 mgmt_new_ltk(hdev, key, 1);
2247
75d262c2
VCG
2248 return 0;
2249}
2250
55ed8ca1
JH
2251int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2252{
2253 struct link_key *key;
2254
2255 key = hci_find_link_key(hdev, bdaddr);
2256 if (!key)
2257 return -ENOENT;
2258
6ed93dc6 2259 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2260
2261 list_del(&key->list);
2262 kfree(key);
2263
2264 return 0;
2265}
2266
b899efaf
VCG
2267int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2268{
2269 struct smp_ltk *k, *tmp;
2270
2271 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2272 if (bacmp(bdaddr, &k->bdaddr))
2273 continue;
2274
6ed93dc6 2275 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2276
2277 list_del(&k->list);
2278 kfree(k);
2279 }
2280
2281 return 0;
2282}
2283
6bd32326 2284/* HCI command timer function */
bda4f23a 2285static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2286{
2287 struct hci_dev *hdev = (void *) arg;
2288
bda4f23a
AE
2289 if (hdev->sent_cmd) {
2290 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2291 u16 opcode = __le16_to_cpu(sent->opcode);
2292
2293 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2294 } else {
2295 BT_ERR("%s command tx timeout", hdev->name);
2296 }
2297
6bd32326 2298 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2299 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2300}
2301
2763eda6 2302struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2303 bdaddr_t *bdaddr)
2763eda6
SJ
2304{
2305 struct oob_data *data;
2306
2307 list_for_each_entry(data, &hdev->remote_oob_data, list)
2308 if (bacmp(bdaddr, &data->bdaddr) == 0)
2309 return data;
2310
2311 return NULL;
2312}
2313
2314int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2315{
2316 struct oob_data *data;
2317
2318 data = hci_find_remote_oob_data(hdev, bdaddr);
2319 if (!data)
2320 return -ENOENT;
2321
6ed93dc6 2322 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2323
2324 list_del(&data->list);
2325 kfree(data);
2326
2327 return 0;
2328}
2329
2330int hci_remote_oob_data_clear(struct hci_dev *hdev)
2331{
2332 struct oob_data *data, *n;
2333
2334 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2335 list_del(&data->list);
2336 kfree(data);
2337 }
2338
2339 return 0;
2340}
2341
2342int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2343 u8 *randomizer)
2763eda6
SJ
2344{
2345 struct oob_data *data;
2346
2347 data = hci_find_remote_oob_data(hdev, bdaddr);
2348
2349 if (!data) {
2350 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2351 if (!data)
2352 return -ENOMEM;
2353
2354 bacpy(&data->bdaddr, bdaddr);
2355 list_add(&data->list, &hdev->remote_oob_data);
2356 }
2357
2358 memcpy(data->hash, hash, sizeof(data->hash));
2359 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2360
6ed93dc6 2361 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2362
2363 return 0;
2364}
2365
b9ee0a78
MH
2366struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2367 bdaddr_t *bdaddr, u8 type)
b2a66aad 2368{
8035ded4 2369 struct bdaddr_list *b;
b2a66aad 2370
b9ee0a78
MH
2371 list_for_each_entry(b, &hdev->blacklist, list) {
2372 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2373 return b;
b9ee0a78 2374 }
b2a66aad
AJ
2375
2376 return NULL;
2377}
2378
2379int hci_blacklist_clear(struct hci_dev *hdev)
2380{
2381 struct list_head *p, *n;
2382
2383 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2384 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2385
2386 list_del(p);
2387 kfree(b);
2388 }
2389
2390 return 0;
2391}
2392
88c1fe4b 2393int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2394{
2395 struct bdaddr_list *entry;
b2a66aad 2396
b9ee0a78 2397 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2398 return -EBADF;
2399
b9ee0a78 2400 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2401 return -EEXIST;
b2a66aad
AJ
2402
2403 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2404 if (!entry)
2405 return -ENOMEM;
b2a66aad
AJ
2406
2407 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2408 entry->bdaddr_type = type;
b2a66aad
AJ
2409
2410 list_add(&entry->list, &hdev->blacklist);
2411
88c1fe4b 2412 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2413}
2414
88c1fe4b 2415int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2416{
2417 struct bdaddr_list *entry;
b2a66aad 2418
b9ee0a78 2419 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2420 return hci_blacklist_clear(hdev);
b2a66aad 2421
b9ee0a78 2422 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2423 if (!entry)
5e762444 2424 return -ENOENT;
b2a66aad
AJ
2425
2426 list_del(&entry->list);
2427 kfree(entry);
2428
88c1fe4b 2429 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2430}
2431
4c87eaab 2432static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2433{
4c87eaab
AG
2434 if (status) {
2435 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2436
4c87eaab
AG
2437 hci_dev_lock(hdev);
2438 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2439 hci_dev_unlock(hdev);
2440 return;
2441 }
7ba8b4be
AG
2442}
2443
4c87eaab 2444static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2445{
4c87eaab
AG
2446 /* General inquiry access code (GIAC) */
2447 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2448 struct hci_request req;
2449 struct hci_cp_inquiry cp;
7ba8b4be
AG
2450 int err;
2451
4c87eaab
AG
2452 if (status) {
2453 BT_ERR("Failed to disable LE scanning: status %d", status);
2454 return;
2455 }
7ba8b4be 2456
4c87eaab
AG
2457 switch (hdev->discovery.type) {
2458 case DISCOV_TYPE_LE:
2459 hci_dev_lock(hdev);
2460 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2461 hci_dev_unlock(hdev);
2462 break;
7ba8b4be 2463
4c87eaab
AG
2464 case DISCOV_TYPE_INTERLEAVED:
2465 hci_req_init(&req, hdev);
7ba8b4be 2466
4c87eaab
AG
2467 memset(&cp, 0, sizeof(cp));
2468 memcpy(&cp.lap, lap, sizeof(cp.lap));
2469 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2470 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2471
4c87eaab 2472 hci_dev_lock(hdev);
7dbfac1d 2473
4c87eaab 2474 hci_inquiry_cache_flush(hdev);
7dbfac1d 2475
4c87eaab
AG
2476 err = hci_req_run(&req, inquiry_complete);
2477 if (err) {
2478 BT_ERR("Inquiry request failed: err %d", err);
2479 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2480 }
7dbfac1d 2481
4c87eaab
AG
2482 hci_dev_unlock(hdev);
2483 break;
7dbfac1d 2484 }
7dbfac1d
AG
2485}
2486
7ba8b4be
AG
2487static void le_scan_disable_work(struct work_struct *work)
2488{
2489 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2490 le_scan_disable.work);
7ba8b4be 2491 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2492 struct hci_request req;
2493 int err;
7ba8b4be
AG
2494
2495 BT_DBG("%s", hdev->name);
2496
4c87eaab 2497 hci_req_init(&req, hdev);
28b75a89 2498
7ba8b4be 2499 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2500 cp.enable = LE_SCAN_DISABLE;
2501 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2502
4c87eaab
AG
2503 err = hci_req_run(&req, le_scan_disable_work_complete);
2504 if (err)
2505 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2506}
2507
9be0dab7
DH
2508/* Alloc HCI device */
2509struct hci_dev *hci_alloc_dev(void)
2510{
2511 struct hci_dev *hdev;
2512
2513 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2514 if (!hdev)
2515 return NULL;
2516
b1b813d4
DH
2517 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2518 hdev->esco_type = (ESCO_HV1);
2519 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2520 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2521 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2522 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2523 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2524
b1b813d4
DH
2525 hdev->sniff_max_interval = 800;
2526 hdev->sniff_min_interval = 80;
2527
bef64738
MH
2528 hdev->le_scan_interval = 0x0060;
2529 hdev->le_scan_window = 0x0030;
2530
b1b813d4
DH
2531 mutex_init(&hdev->lock);
2532 mutex_init(&hdev->req_lock);
2533
2534 INIT_LIST_HEAD(&hdev->mgmt_pending);
2535 INIT_LIST_HEAD(&hdev->blacklist);
2536 INIT_LIST_HEAD(&hdev->uuids);
2537 INIT_LIST_HEAD(&hdev->link_keys);
2538 INIT_LIST_HEAD(&hdev->long_term_keys);
2539 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2540 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2541
2542 INIT_WORK(&hdev->rx_work, hci_rx_work);
2543 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2544 INIT_WORK(&hdev->tx_work, hci_tx_work);
2545 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2546
b1b813d4
DH
2547 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2548 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2549 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2550
b1b813d4
DH
2551 skb_queue_head_init(&hdev->rx_q);
2552 skb_queue_head_init(&hdev->cmd_q);
2553 skb_queue_head_init(&hdev->raw_q);
2554
2555 init_waitqueue_head(&hdev->req_wait_q);
2556
bda4f23a 2557 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2558
b1b813d4
DH
2559 hci_init_sysfs(hdev);
2560 discovery_init(hdev);
9be0dab7
DH
2561
2562 return hdev;
2563}
2564EXPORT_SYMBOL(hci_alloc_dev);
2565
2566/* Free HCI device */
2567void hci_free_dev(struct hci_dev *hdev)
2568{
9be0dab7
DH
2569 /* will free via device release */
2570 put_device(&hdev->dev);
2571}
2572EXPORT_SYMBOL(hci_free_dev);
2573
1da177e4
LT
2574/* Register HCI device */
2575int hci_register_dev(struct hci_dev *hdev)
2576{
b1b813d4 2577 int id, error;
1da177e4 2578
010666a1 2579 if (!hdev->open || !hdev->close)
1da177e4
LT
2580 return -EINVAL;
2581
08add513
MM
2582 /* Do not allow HCI_AMP devices to register at index 0,
2583 * so the index can be used as the AMP controller ID.
2584 */
3df92b31
SL
2585 switch (hdev->dev_type) {
2586 case HCI_BREDR:
2587 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2588 break;
2589 case HCI_AMP:
2590 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2591 break;
2592 default:
2593 return -EINVAL;
1da177e4 2594 }
8e87d142 2595
3df92b31
SL
2596 if (id < 0)
2597 return id;
2598
1da177e4
LT
2599 sprintf(hdev->name, "hci%d", id);
2600 hdev->id = id;
2d8b3a11
AE
2601
2602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2603
d8537548
KC
2604 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2605 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2606 if (!hdev->workqueue) {
2607 error = -ENOMEM;
2608 goto err;
2609 }
f48fd9c8 2610
d8537548
KC
2611 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2612 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2613 if (!hdev->req_workqueue) {
2614 destroy_workqueue(hdev->workqueue);
2615 error = -ENOMEM;
2616 goto err;
2617 }
2618
0153e2ec
MH
2619 if (!IS_ERR_OR_NULL(bt_debugfs))
2620 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2621
bdc3e0f1
MH
2622 dev_set_name(&hdev->dev, "%s", hdev->name);
2623
2624 error = device_add(&hdev->dev);
33ca954d
DH
2625 if (error < 0)
2626 goto err_wqueue;
1da177e4 2627
611b30f7 2628 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2629 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2630 hdev);
611b30f7
MH
2631 if (hdev->rfkill) {
2632 if (rfkill_register(hdev->rfkill) < 0) {
2633 rfkill_destroy(hdev->rfkill);
2634 hdev->rfkill = NULL;
2635 }
2636 }
2637
5e130367
JH
2638 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2639 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2640
a8b2d5c2 2641 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2642 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2643
01cd3404 2644 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2645 /* Assume BR/EDR support until proven otherwise (such as
2646 * through reading supported features during init.
2647 */
2648 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2649 }
ce2be9ac 2650
fcee3377
GP
2651 write_lock(&hci_dev_list_lock);
2652 list_add(&hdev->list, &hci_dev_list);
2653 write_unlock(&hci_dev_list_lock);
2654
1da177e4 2655 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2656 hci_dev_hold(hdev);
1da177e4 2657
19202573 2658 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2659
1da177e4 2660 return id;
f48fd9c8 2661
33ca954d
DH
2662err_wqueue:
2663 destroy_workqueue(hdev->workqueue);
6ead1bbc 2664 destroy_workqueue(hdev->req_workqueue);
33ca954d 2665err:
3df92b31 2666 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2667
33ca954d 2668 return error;
1da177e4
LT
2669}
2670EXPORT_SYMBOL(hci_register_dev);
2671
2672/* Unregister HCI device */
59735631 2673void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2674{
3df92b31 2675 int i, id;
ef222013 2676
c13854ce 2677 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2678
94324962
JH
2679 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2680
3df92b31
SL
2681 id = hdev->id;
2682
f20d09d5 2683 write_lock(&hci_dev_list_lock);
1da177e4 2684 list_del(&hdev->list);
f20d09d5 2685 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2686
2687 hci_dev_do_close(hdev);
2688
cd4c5391 2689 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2690 kfree_skb(hdev->reassembly[i]);
2691
b9b5ef18
GP
2692 cancel_work_sync(&hdev->power_on);
2693
ab81cbf9 2694 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2695 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2696 hci_dev_lock(hdev);
744cf19e 2697 mgmt_index_removed(hdev);
09fd0de5 2698 hci_dev_unlock(hdev);
56e5cb86 2699 }
ab81cbf9 2700
2e58ef3e
JH
2701 /* mgmt_index_removed should take care of emptying the
2702 * pending list */
2703 BUG_ON(!list_empty(&hdev->mgmt_pending));
2704
1da177e4
LT
2705 hci_notify(hdev, HCI_DEV_UNREG);
2706
611b30f7
MH
2707 if (hdev->rfkill) {
2708 rfkill_unregister(hdev->rfkill);
2709 rfkill_destroy(hdev->rfkill);
2710 }
2711
bdc3e0f1 2712 device_del(&hdev->dev);
147e2d59 2713
0153e2ec
MH
2714 debugfs_remove_recursive(hdev->debugfs);
2715
f48fd9c8 2716 destroy_workqueue(hdev->workqueue);
6ead1bbc 2717 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2718
09fd0de5 2719 hci_dev_lock(hdev);
e2e0cacb 2720 hci_blacklist_clear(hdev);
2aeb9a1a 2721 hci_uuids_clear(hdev);
55ed8ca1 2722 hci_link_keys_clear(hdev);
b899efaf 2723 hci_smp_ltks_clear(hdev);
2763eda6 2724 hci_remote_oob_data_clear(hdev);
09fd0de5 2725 hci_dev_unlock(hdev);
e2e0cacb 2726
dc946bd8 2727 hci_dev_put(hdev);
3df92b31
SL
2728
2729 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2730}
2731EXPORT_SYMBOL(hci_unregister_dev);
2732
2733/* Suspend HCI device */
2734int hci_suspend_dev(struct hci_dev *hdev)
2735{
2736 hci_notify(hdev, HCI_DEV_SUSPEND);
2737 return 0;
2738}
2739EXPORT_SYMBOL(hci_suspend_dev);
2740
2741/* Resume HCI device */
2742int hci_resume_dev(struct hci_dev *hdev)
2743{
2744 hci_notify(hdev, HCI_DEV_RESUME);
2745 return 0;
2746}
2747EXPORT_SYMBOL(hci_resume_dev);
2748
76bca880 2749/* Receive frame from HCI drivers */
e1a26170 2750int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2751{
76bca880 2752 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2753 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2754 kfree_skb(skb);
2755 return -ENXIO;
2756 }
2757
d82603c6 2758 /* Incoming skb */
76bca880
MH
2759 bt_cb(skb)->incoming = 1;
2760
2761 /* Time stamp */
2762 __net_timestamp(skb);
2763
76bca880 2764 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2765 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2766
76bca880
MH
2767 return 0;
2768}
2769EXPORT_SYMBOL(hci_recv_frame);
2770
33e882a5 2771static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2772 int count, __u8 index)
33e882a5
SS
2773{
2774 int len = 0;
2775 int hlen = 0;
2776 int remain = count;
2777 struct sk_buff *skb;
2778 struct bt_skb_cb *scb;
2779
2780 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2781 index >= NUM_REASSEMBLY)
33e882a5
SS
2782 return -EILSEQ;
2783
2784 skb = hdev->reassembly[index];
2785
2786 if (!skb) {
2787 switch (type) {
2788 case HCI_ACLDATA_PKT:
2789 len = HCI_MAX_FRAME_SIZE;
2790 hlen = HCI_ACL_HDR_SIZE;
2791 break;
2792 case HCI_EVENT_PKT:
2793 len = HCI_MAX_EVENT_SIZE;
2794 hlen = HCI_EVENT_HDR_SIZE;
2795 break;
2796 case HCI_SCODATA_PKT:
2797 len = HCI_MAX_SCO_SIZE;
2798 hlen = HCI_SCO_HDR_SIZE;
2799 break;
2800 }
2801
1e429f38 2802 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2803 if (!skb)
2804 return -ENOMEM;
2805
2806 scb = (void *) skb->cb;
2807 scb->expect = hlen;
2808 scb->pkt_type = type;
2809
33e882a5
SS
2810 hdev->reassembly[index] = skb;
2811 }
2812
2813 while (count) {
2814 scb = (void *) skb->cb;
89bb46d0 2815 len = min_t(uint, scb->expect, count);
33e882a5
SS
2816
2817 memcpy(skb_put(skb, len), data, len);
2818
2819 count -= len;
2820 data += len;
2821 scb->expect -= len;
2822 remain = count;
2823
2824 switch (type) {
2825 case HCI_EVENT_PKT:
2826 if (skb->len == HCI_EVENT_HDR_SIZE) {
2827 struct hci_event_hdr *h = hci_event_hdr(skb);
2828 scb->expect = h->plen;
2829
2830 if (skb_tailroom(skb) < scb->expect) {
2831 kfree_skb(skb);
2832 hdev->reassembly[index] = NULL;
2833 return -ENOMEM;
2834 }
2835 }
2836 break;
2837
2838 case HCI_ACLDATA_PKT:
2839 if (skb->len == HCI_ACL_HDR_SIZE) {
2840 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2841 scb->expect = __le16_to_cpu(h->dlen);
2842
2843 if (skb_tailroom(skb) < scb->expect) {
2844 kfree_skb(skb);
2845 hdev->reassembly[index] = NULL;
2846 return -ENOMEM;
2847 }
2848 }
2849 break;
2850
2851 case HCI_SCODATA_PKT:
2852 if (skb->len == HCI_SCO_HDR_SIZE) {
2853 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2854 scb->expect = h->dlen;
2855
2856 if (skb_tailroom(skb) < scb->expect) {
2857 kfree_skb(skb);
2858 hdev->reassembly[index] = NULL;
2859 return -ENOMEM;
2860 }
2861 }
2862 break;
2863 }
2864
2865 if (scb->expect == 0) {
2866 /* Complete frame */
2867
2868 bt_cb(skb)->pkt_type = type;
e1a26170 2869 hci_recv_frame(hdev, skb);
33e882a5
SS
2870
2871 hdev->reassembly[index] = NULL;
2872 return remain;
2873 }
2874 }
2875
2876 return remain;
2877}
2878
ef222013
MH
2879int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2880{
f39a3c06
SS
2881 int rem = 0;
2882
ef222013
MH
2883 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2884 return -EILSEQ;
2885
da5f6c37 2886 while (count) {
1e429f38 2887 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2888 if (rem < 0)
2889 return rem;
ef222013 2890
f39a3c06
SS
2891 data += (count - rem);
2892 count = rem;
f81c6224 2893 }
ef222013 2894
f39a3c06 2895 return rem;
ef222013
MH
2896}
2897EXPORT_SYMBOL(hci_recv_fragment);
2898
99811510
SS
2899#define STREAM_REASSEMBLY 0
2900
2901int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2902{
2903 int type;
2904 int rem = 0;
2905
da5f6c37 2906 while (count) {
99811510
SS
2907 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2908
2909 if (!skb) {
2910 struct { char type; } *pkt;
2911
2912 /* Start of the frame */
2913 pkt = data;
2914 type = pkt->type;
2915
2916 data++;
2917 count--;
2918 } else
2919 type = bt_cb(skb)->pkt_type;
2920
1e429f38 2921 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2922 STREAM_REASSEMBLY);
99811510
SS
2923 if (rem < 0)
2924 return rem;
2925
2926 data += (count - rem);
2927 count = rem;
f81c6224 2928 }
99811510
SS
2929
2930 return rem;
2931}
2932EXPORT_SYMBOL(hci_recv_stream_fragment);
2933
1da177e4
LT
2934/* ---- Interface to upper protocols ---- */
2935
1da177e4
LT
2936int hci_register_cb(struct hci_cb *cb)
2937{
2938 BT_DBG("%p name %s", cb, cb->name);
2939
f20d09d5 2940 write_lock(&hci_cb_list_lock);
1da177e4 2941 list_add(&cb->list, &hci_cb_list);
f20d09d5 2942 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2943
2944 return 0;
2945}
2946EXPORT_SYMBOL(hci_register_cb);
2947
2948int hci_unregister_cb(struct hci_cb *cb)
2949{
2950 BT_DBG("%p name %s", cb, cb->name);
2951
f20d09d5 2952 write_lock(&hci_cb_list_lock);
1da177e4 2953 list_del(&cb->list);
f20d09d5 2954 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2955
2956 return 0;
2957}
2958EXPORT_SYMBOL(hci_unregister_cb);
2959
51086991 2960static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2961{
0d48d939 2962 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2963
cd82e61c
MH
2964 /* Time stamp */
2965 __net_timestamp(skb);
1da177e4 2966
cd82e61c
MH
2967 /* Send copy to monitor */
2968 hci_send_to_monitor(hdev, skb);
2969
2970 if (atomic_read(&hdev->promisc)) {
2971 /* Send copy to the sockets */
470fe1b5 2972 hci_send_to_sock(hdev, skb);
1da177e4
LT
2973 }
2974
2975 /* Get rid of skb owner, prior to sending to the driver. */
2976 skb_orphan(skb);
2977
7bd8f09f 2978 if (hdev->send(hdev, skb) < 0)
51086991 2979 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2980}
2981
3119ae95
JH
2982void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2983{
2984 skb_queue_head_init(&req->cmd_q);
2985 req->hdev = hdev;
5d73e034 2986 req->err = 0;
3119ae95
JH
2987}
2988
2989int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2990{
2991 struct hci_dev *hdev = req->hdev;
2992 struct sk_buff *skb;
2993 unsigned long flags;
2994
2995 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2996
5d73e034
AG
2997 /* If an error occured during request building, remove all HCI
2998 * commands queued on the HCI request queue.
2999 */
3000 if (req->err) {
3001 skb_queue_purge(&req->cmd_q);
3002 return req->err;
3003 }
3004
3119ae95
JH
3005 /* Do not allow empty requests */
3006 if (skb_queue_empty(&req->cmd_q))
382b0c39 3007 return -ENODATA;
3119ae95
JH
3008
3009 skb = skb_peek_tail(&req->cmd_q);
3010 bt_cb(skb)->req.complete = complete;
3011
3012 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3013 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3014 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3015
3016 queue_work(hdev->workqueue, &hdev->cmd_work);
3017
3018 return 0;
3019}
3020
1ca3a9d0 3021static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3022 u32 plen, const void *param)
1da177e4
LT
3023{
3024 int len = HCI_COMMAND_HDR_SIZE + plen;
3025 struct hci_command_hdr *hdr;
3026 struct sk_buff *skb;
3027
1da177e4 3028 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3029 if (!skb)
3030 return NULL;
1da177e4
LT
3031
3032 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3033 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3034 hdr->plen = plen;
3035
3036 if (plen)
3037 memcpy(skb_put(skb, plen), param, plen);
3038
3039 BT_DBG("skb len %d", skb->len);
3040
0d48d939 3041 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3042
1ca3a9d0
JH
3043 return skb;
3044}
3045
3046/* Send HCI command */
07dc93dd
JH
3047int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3048 const void *param)
1ca3a9d0
JH
3049{
3050 struct sk_buff *skb;
3051
3052 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3053
3054 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3055 if (!skb) {
3056 BT_ERR("%s no memory for command", hdev->name);
3057 return -ENOMEM;
3058 }
3059
11714b3d
JH
3060 /* Stand-alone HCI commands must be flaged as
3061 * single-command requests.
3062 */
3063 bt_cb(skb)->req.start = true;
3064
1da177e4 3065 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3066 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3067
3068 return 0;
3069}
1da177e4 3070
71c76a17 3071/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3072void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3073 const void *param, u8 event)
71c76a17
JH
3074{
3075 struct hci_dev *hdev = req->hdev;
3076 struct sk_buff *skb;
3077
3078 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3079
34739c1e
AG
3080 /* If an error occured during request building, there is no point in
3081 * queueing the HCI command. We can simply return.
3082 */
3083 if (req->err)
3084 return;
3085
71c76a17
JH
3086 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3087 if (!skb) {
5d73e034
AG
3088 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3089 hdev->name, opcode);
3090 req->err = -ENOMEM;
e348fe6b 3091 return;
71c76a17
JH
3092 }
3093
3094 if (skb_queue_empty(&req->cmd_q))
3095 bt_cb(skb)->req.start = true;
3096
02350a72
JH
3097 bt_cb(skb)->req.event = event;
3098
71c76a17 3099 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3100}
3101
07dc93dd
JH
3102void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3103 const void *param)
02350a72
JH
3104{
3105 hci_req_add_ev(req, opcode, plen, param, 0);
3106}
3107
1da177e4 3108/* Get data from the previously sent command */
a9de9248 3109void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3110{
3111 struct hci_command_hdr *hdr;
3112
3113 if (!hdev->sent_cmd)
3114 return NULL;
3115
3116 hdr = (void *) hdev->sent_cmd->data;
3117
a9de9248 3118 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3119 return NULL;
3120
f0e09510 3121 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3122
3123 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3124}
3125
3126/* Send ACL data */
3127static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3128{
3129 struct hci_acl_hdr *hdr;
3130 int len = skb->len;
3131
badff6d0
ACM
3132 skb_push(skb, HCI_ACL_HDR_SIZE);
3133 skb_reset_transport_header(skb);
9c70220b 3134 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3135 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3136 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3137}
3138
ee22be7e 3139static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3140 struct sk_buff *skb, __u16 flags)
1da177e4 3141{
ee22be7e 3142 struct hci_conn *conn = chan->conn;
1da177e4
LT
3143 struct hci_dev *hdev = conn->hdev;
3144 struct sk_buff *list;
3145
087bfd99
GP
3146 skb->len = skb_headlen(skb);
3147 skb->data_len = 0;
3148
3149 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3150
3151 switch (hdev->dev_type) {
3152 case HCI_BREDR:
3153 hci_add_acl_hdr(skb, conn->handle, flags);
3154 break;
3155 case HCI_AMP:
3156 hci_add_acl_hdr(skb, chan->handle, flags);
3157 break;
3158 default:
3159 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3160 return;
3161 }
087bfd99 3162
70f23020
AE
3163 list = skb_shinfo(skb)->frag_list;
3164 if (!list) {
1da177e4
LT
3165 /* Non fragmented */
3166 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3167
73d80deb 3168 skb_queue_tail(queue, skb);
1da177e4
LT
3169 } else {
3170 /* Fragmented */
3171 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3172
3173 skb_shinfo(skb)->frag_list = NULL;
3174
3175 /* Queue all fragments atomically */
af3e6359 3176 spin_lock(&queue->lock);
1da177e4 3177
73d80deb 3178 __skb_queue_tail(queue, skb);
e702112f
AE
3179
3180 flags &= ~ACL_START;
3181 flags |= ACL_CONT;
1da177e4
LT
3182 do {
3183 skb = list; list = list->next;
8e87d142 3184
0d48d939 3185 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3186 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3187
3188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3189
73d80deb 3190 __skb_queue_tail(queue, skb);
1da177e4
LT
3191 } while (list);
3192
af3e6359 3193 spin_unlock(&queue->lock);
1da177e4 3194 }
73d80deb
LAD
3195}
3196
3197void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3198{
ee22be7e 3199 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3200
f0e09510 3201 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3202
ee22be7e 3203 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3204
3eff45ea 3205 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3206}
1da177e4
LT
3207
3208/* Send SCO data */
0d861d8b 3209void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3210{
3211 struct hci_dev *hdev = conn->hdev;
3212 struct hci_sco_hdr hdr;
3213
3214 BT_DBG("%s len %d", hdev->name, skb->len);
3215
aca3192c 3216 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3217 hdr.dlen = skb->len;
3218
badff6d0
ACM
3219 skb_push(skb, HCI_SCO_HDR_SIZE);
3220 skb_reset_transport_header(skb);
9c70220b 3221 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3222
0d48d939 3223 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3224
1da177e4 3225 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3226 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3227}
1da177e4
LT
3228
3229/* ---- HCI TX task (outgoing data) ---- */
3230
3231/* HCI Connection scheduler */
6039aa73
GP
3232static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3233 int *quote)
1da177e4
LT
3234{
3235 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3236 struct hci_conn *conn = NULL, *c;
abc5de8f 3237 unsigned int num = 0, min = ~0;
1da177e4 3238
8e87d142 3239 /* We don't have to lock device here. Connections are always
1da177e4 3240 * added and removed with TX task disabled. */
bf4c6325
GP
3241
3242 rcu_read_lock();
3243
3244 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3245 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3246 continue;
769be974
MH
3247
3248 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3249 continue;
3250
1da177e4
LT
3251 num++;
3252
3253 if (c->sent < min) {
3254 min = c->sent;
3255 conn = c;
3256 }
52087a79
LAD
3257
3258 if (hci_conn_num(hdev, type) == num)
3259 break;
1da177e4
LT
3260 }
3261
bf4c6325
GP
3262 rcu_read_unlock();
3263
1da177e4 3264 if (conn) {
6ed58ec5
VT
3265 int cnt, q;
3266
3267 switch (conn->type) {
3268 case ACL_LINK:
3269 cnt = hdev->acl_cnt;
3270 break;
3271 case SCO_LINK:
3272 case ESCO_LINK:
3273 cnt = hdev->sco_cnt;
3274 break;
3275 case LE_LINK:
3276 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3277 break;
3278 default:
3279 cnt = 0;
3280 BT_ERR("Unknown link type");
3281 }
3282
3283 q = cnt / num;
1da177e4
LT
3284 *quote = q ? q : 1;
3285 } else
3286 *quote = 0;
3287
3288 BT_DBG("conn %p quote %d", conn, *quote);
3289 return conn;
3290}
3291
6039aa73 3292static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3293{
3294 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3295 struct hci_conn *c;
1da177e4 3296
bae1f5d9 3297 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3298
bf4c6325
GP
3299 rcu_read_lock();
3300
1da177e4 3301 /* Kill stalled connections */
bf4c6325 3302 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3303 if (c->type == type && c->sent) {
6ed93dc6
AE
3304 BT_ERR("%s killing stalled connection %pMR",
3305 hdev->name, &c->dst);
bed71748 3306 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3307 }
3308 }
bf4c6325
GP
3309
3310 rcu_read_unlock();
1da177e4
LT
3311}
3312
6039aa73
GP
3313static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3314 int *quote)
1da177e4 3315{
73d80deb
LAD
3316 struct hci_conn_hash *h = &hdev->conn_hash;
3317 struct hci_chan *chan = NULL;
abc5de8f 3318 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3319 struct hci_conn *conn;
73d80deb
LAD
3320 int cnt, q, conn_num = 0;
3321
3322 BT_DBG("%s", hdev->name);
3323
bf4c6325
GP
3324 rcu_read_lock();
3325
3326 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3327 struct hci_chan *tmp;
3328
3329 if (conn->type != type)
3330 continue;
3331
3332 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3333 continue;
3334
3335 conn_num++;
3336
8192edef 3337 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3338 struct sk_buff *skb;
3339
3340 if (skb_queue_empty(&tmp->data_q))
3341 continue;
3342
3343 skb = skb_peek(&tmp->data_q);
3344 if (skb->priority < cur_prio)
3345 continue;
3346
3347 if (skb->priority > cur_prio) {
3348 num = 0;
3349 min = ~0;
3350 cur_prio = skb->priority;
3351 }
3352
3353 num++;
3354
3355 if (conn->sent < min) {
3356 min = conn->sent;
3357 chan = tmp;
3358 }
3359 }
3360
3361 if (hci_conn_num(hdev, type) == conn_num)
3362 break;
3363 }
3364
bf4c6325
GP
3365 rcu_read_unlock();
3366
73d80deb
LAD
3367 if (!chan)
3368 return NULL;
3369
3370 switch (chan->conn->type) {
3371 case ACL_LINK:
3372 cnt = hdev->acl_cnt;
3373 break;
bd1eb66b
AE
3374 case AMP_LINK:
3375 cnt = hdev->block_cnt;
3376 break;
73d80deb
LAD
3377 case SCO_LINK:
3378 case ESCO_LINK:
3379 cnt = hdev->sco_cnt;
3380 break;
3381 case LE_LINK:
3382 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3383 break;
3384 default:
3385 cnt = 0;
3386 BT_ERR("Unknown link type");
3387 }
3388
3389 q = cnt / num;
3390 *quote = q ? q : 1;
3391 BT_DBG("chan %p quote %d", chan, *quote);
3392 return chan;
3393}
3394
02b20f0b
LAD
3395static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3396{
3397 struct hci_conn_hash *h = &hdev->conn_hash;
3398 struct hci_conn *conn;
3399 int num = 0;
3400
3401 BT_DBG("%s", hdev->name);
3402
bf4c6325
GP
3403 rcu_read_lock();
3404
3405 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3406 struct hci_chan *chan;
3407
3408 if (conn->type != type)
3409 continue;
3410
3411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3412 continue;
3413
3414 num++;
3415
8192edef 3416 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3417 struct sk_buff *skb;
3418
3419 if (chan->sent) {
3420 chan->sent = 0;
3421 continue;
3422 }
3423
3424 if (skb_queue_empty(&chan->data_q))
3425 continue;
3426
3427 skb = skb_peek(&chan->data_q);
3428 if (skb->priority >= HCI_PRIO_MAX - 1)
3429 continue;
3430
3431 skb->priority = HCI_PRIO_MAX - 1;
3432
3433 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3434 skb->priority);
02b20f0b
LAD
3435 }
3436
3437 if (hci_conn_num(hdev, type) == num)
3438 break;
3439 }
bf4c6325
GP
3440
3441 rcu_read_unlock();
3442
02b20f0b
LAD
3443}
3444
b71d385a
AE
3445static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3446{
3447 /* Calculate count of blocks used by this packet */
3448 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3449}
3450
6039aa73 3451static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3452{
1da177e4
LT
3453 if (!test_bit(HCI_RAW, &hdev->flags)) {
3454 /* ACL tx timeout must be longer than maximum
3455 * link supervision timeout (40.9 seconds) */
63d2bc1b 3456 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3457 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3458 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3459 }
63d2bc1b 3460}
1da177e4 3461
6039aa73 3462static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3463{
3464 unsigned int cnt = hdev->acl_cnt;
3465 struct hci_chan *chan;
3466 struct sk_buff *skb;
3467 int quote;
3468
3469 __check_timeout(hdev, cnt);
04837f64 3470
73d80deb 3471 while (hdev->acl_cnt &&
a8c5fb1a 3472 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3473 u32 priority = (skb_peek(&chan->data_q))->priority;
3474 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3475 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3476 skb->len, skb->priority);
73d80deb 3477
ec1cce24
LAD
3478 /* Stop if priority has changed */
3479 if (skb->priority < priority)
3480 break;
3481
3482 skb = skb_dequeue(&chan->data_q);
3483
73d80deb 3484 hci_conn_enter_active_mode(chan->conn,
04124681 3485 bt_cb(skb)->force_active);
04837f64 3486
57d17d70 3487 hci_send_frame(hdev, skb);
1da177e4
LT
3488 hdev->acl_last_tx = jiffies;
3489
3490 hdev->acl_cnt--;
73d80deb
LAD
3491 chan->sent++;
3492 chan->conn->sent++;
1da177e4
LT
3493 }
3494 }
02b20f0b
LAD
3495
3496 if (cnt != hdev->acl_cnt)
3497 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3498}
3499
6039aa73 3500static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3501{
63d2bc1b 3502 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3503 struct hci_chan *chan;
3504 struct sk_buff *skb;
3505 int quote;
bd1eb66b 3506 u8 type;
b71d385a 3507
63d2bc1b 3508 __check_timeout(hdev, cnt);
b71d385a 3509
bd1eb66b
AE
3510 BT_DBG("%s", hdev->name);
3511
3512 if (hdev->dev_type == HCI_AMP)
3513 type = AMP_LINK;
3514 else
3515 type = ACL_LINK;
3516
b71d385a 3517 while (hdev->block_cnt > 0 &&
bd1eb66b 3518 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3519 u32 priority = (skb_peek(&chan->data_q))->priority;
3520 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3521 int blocks;
3522
3523 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3524 skb->len, skb->priority);
b71d385a
AE
3525
3526 /* Stop if priority has changed */
3527 if (skb->priority < priority)
3528 break;
3529
3530 skb = skb_dequeue(&chan->data_q);
3531
3532 blocks = __get_blocks(hdev, skb);
3533 if (blocks > hdev->block_cnt)
3534 return;
3535
3536 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3537 bt_cb(skb)->force_active);
b71d385a 3538
57d17d70 3539 hci_send_frame(hdev, skb);
b71d385a
AE
3540 hdev->acl_last_tx = jiffies;
3541
3542 hdev->block_cnt -= blocks;
3543 quote -= blocks;
3544
3545 chan->sent += blocks;
3546 chan->conn->sent += blocks;
3547 }
3548 }
3549
3550 if (cnt != hdev->block_cnt)
bd1eb66b 3551 hci_prio_recalculate(hdev, type);
b71d385a
AE
3552}
3553
6039aa73 3554static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3555{
3556 BT_DBG("%s", hdev->name);
3557
bd1eb66b
AE
3558 /* No ACL link over BR/EDR controller */
3559 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3560 return;
3561
3562 /* No AMP link over AMP controller */
3563 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3564 return;
3565
3566 switch (hdev->flow_ctl_mode) {
3567 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3568 hci_sched_acl_pkt(hdev);
3569 break;
3570
3571 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3572 hci_sched_acl_blk(hdev);
3573 break;
3574 }
3575}
3576
1da177e4 3577/* Schedule SCO */
6039aa73 3578static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3579{
3580 struct hci_conn *conn;
3581 struct sk_buff *skb;
3582 int quote;
3583
3584 BT_DBG("%s", hdev->name);
3585
52087a79
LAD
3586 if (!hci_conn_num(hdev, SCO_LINK))
3587 return;
3588
1da177e4
LT
3589 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3590 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3591 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3592 hci_send_frame(hdev, skb);
1da177e4
LT
3593
3594 conn->sent++;
3595 if (conn->sent == ~0)
3596 conn->sent = 0;
3597 }
3598 }
3599}
3600
6039aa73 3601static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3602{
3603 struct hci_conn *conn;
3604 struct sk_buff *skb;
3605 int quote;
3606
3607 BT_DBG("%s", hdev->name);
3608
52087a79
LAD
3609 if (!hci_conn_num(hdev, ESCO_LINK))
3610 return;
3611
8fc9ced3
GP
3612 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3613 &quote))) {
b6a0dc82
MH
3614 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3615 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3616 hci_send_frame(hdev, skb);
b6a0dc82
MH
3617
3618 conn->sent++;
3619 if (conn->sent == ~0)
3620 conn->sent = 0;
3621 }
3622 }
3623}
3624
6039aa73 3625static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3626{
73d80deb 3627 struct hci_chan *chan;
6ed58ec5 3628 struct sk_buff *skb;
02b20f0b 3629 int quote, cnt, tmp;
6ed58ec5
VT
3630
3631 BT_DBG("%s", hdev->name);
3632
52087a79
LAD
3633 if (!hci_conn_num(hdev, LE_LINK))
3634 return;
3635
6ed58ec5
VT
3636 if (!test_bit(HCI_RAW, &hdev->flags)) {
3637 /* LE tx timeout must be longer than maximum
3638 * link supervision timeout (40.9 seconds) */
bae1f5d9 3639 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3640 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3641 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3642 }
3643
3644 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3645 tmp = cnt;
73d80deb 3646 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3647 u32 priority = (skb_peek(&chan->data_q))->priority;
3648 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3649 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3650 skb->len, skb->priority);
6ed58ec5 3651
ec1cce24
LAD
3652 /* Stop if priority has changed */
3653 if (skb->priority < priority)
3654 break;
3655
3656 skb = skb_dequeue(&chan->data_q);
3657
57d17d70 3658 hci_send_frame(hdev, skb);
6ed58ec5
VT
3659 hdev->le_last_tx = jiffies;
3660
3661 cnt--;
73d80deb
LAD
3662 chan->sent++;
3663 chan->conn->sent++;
6ed58ec5
VT
3664 }
3665 }
73d80deb 3666
6ed58ec5
VT
3667 if (hdev->le_pkts)
3668 hdev->le_cnt = cnt;
3669 else
3670 hdev->acl_cnt = cnt;
02b20f0b
LAD
3671
3672 if (cnt != tmp)
3673 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3674}
3675
3eff45ea 3676static void hci_tx_work(struct work_struct *work)
1da177e4 3677{
3eff45ea 3678 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3679 struct sk_buff *skb;
3680
6ed58ec5 3681 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3682 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3683
52de599e
MH
3684 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3685 /* Schedule queues and send stuff to HCI driver */
3686 hci_sched_acl(hdev);
3687 hci_sched_sco(hdev);
3688 hci_sched_esco(hdev);
3689 hci_sched_le(hdev);
3690 }
6ed58ec5 3691
1da177e4
LT
3692 /* Send next queued raw (unknown type) packet */
3693 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3694 hci_send_frame(hdev, skb);
1da177e4
LT
3695}
3696
25985edc 3697/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3698
3699/* ACL data packet */
6039aa73 3700static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3701{
3702 struct hci_acl_hdr *hdr = (void *) skb->data;
3703 struct hci_conn *conn;
3704 __u16 handle, flags;
3705
3706 skb_pull(skb, HCI_ACL_HDR_SIZE);
3707
3708 handle = __le16_to_cpu(hdr->handle);
3709 flags = hci_flags(handle);
3710 handle = hci_handle(handle);
3711
f0e09510 3712 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3713 handle, flags);
1da177e4
LT
3714
3715 hdev->stat.acl_rx++;
3716
3717 hci_dev_lock(hdev);
3718 conn = hci_conn_hash_lookup_handle(hdev, handle);
3719 hci_dev_unlock(hdev);
8e87d142 3720
1da177e4 3721 if (conn) {
65983fc7 3722 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3723
1da177e4 3724 /* Send to upper protocol */
686ebf28
UF
3725 l2cap_recv_acldata(conn, skb, flags);
3726 return;
1da177e4 3727 } else {
8e87d142 3728 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3729 hdev->name, handle);
1da177e4
LT
3730 }
3731
3732 kfree_skb(skb);
3733}
3734
3735/* SCO data packet */
6039aa73 3736static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3737{
3738 struct hci_sco_hdr *hdr = (void *) skb->data;
3739 struct hci_conn *conn;
3740 __u16 handle;
3741
3742 skb_pull(skb, HCI_SCO_HDR_SIZE);
3743
3744 handle = __le16_to_cpu(hdr->handle);
3745
f0e09510 3746 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3747
3748 hdev->stat.sco_rx++;
3749
3750 hci_dev_lock(hdev);
3751 conn = hci_conn_hash_lookup_handle(hdev, handle);
3752 hci_dev_unlock(hdev);
3753
3754 if (conn) {
1da177e4 3755 /* Send to upper protocol */
686ebf28
UF
3756 sco_recv_scodata(conn, skb);
3757 return;
1da177e4 3758 } else {
8e87d142 3759 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3760 hdev->name, handle);
1da177e4
LT
3761 }
3762
3763 kfree_skb(skb);
3764}
3765
9238f36a
JH
3766static bool hci_req_is_complete(struct hci_dev *hdev)
3767{
3768 struct sk_buff *skb;
3769
3770 skb = skb_peek(&hdev->cmd_q);
3771 if (!skb)
3772 return true;
3773
3774 return bt_cb(skb)->req.start;
3775}
3776
42c6b129
JH
3777static void hci_resend_last(struct hci_dev *hdev)
3778{
3779 struct hci_command_hdr *sent;
3780 struct sk_buff *skb;
3781 u16 opcode;
3782
3783 if (!hdev->sent_cmd)
3784 return;
3785
3786 sent = (void *) hdev->sent_cmd->data;
3787 opcode = __le16_to_cpu(sent->opcode);
3788 if (opcode == HCI_OP_RESET)
3789 return;
3790
3791 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3792 if (!skb)
3793 return;
3794
3795 skb_queue_head(&hdev->cmd_q, skb);
3796 queue_work(hdev->workqueue, &hdev->cmd_work);
3797}
3798
9238f36a
JH
3799void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3800{
3801 hci_req_complete_t req_complete = NULL;
3802 struct sk_buff *skb;
3803 unsigned long flags;
3804
3805 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3806
42c6b129
JH
3807 /* If the completed command doesn't match the last one that was
3808 * sent we need to do special handling of it.
9238f36a 3809 */
42c6b129
JH
3810 if (!hci_sent_cmd_data(hdev, opcode)) {
3811 /* Some CSR based controllers generate a spontaneous
3812 * reset complete event during init and any pending
3813 * command will never be completed. In such a case we
3814 * need to resend whatever was the last sent
3815 * command.
3816 */
3817 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3818 hci_resend_last(hdev);
3819
9238f36a 3820 return;
42c6b129 3821 }
9238f36a
JH
3822
3823 /* If the command succeeded and there's still more commands in
3824 * this request the request is not yet complete.
3825 */
3826 if (!status && !hci_req_is_complete(hdev))
3827 return;
3828
3829 /* If this was the last command in a request the complete
3830 * callback would be found in hdev->sent_cmd instead of the
3831 * command queue (hdev->cmd_q).
3832 */
3833 if (hdev->sent_cmd) {
3834 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3835
3836 if (req_complete) {
3837 /* We must set the complete callback to NULL to
3838 * avoid calling the callback more than once if
3839 * this function gets called again.
3840 */
3841 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3842
9238f36a 3843 goto call_complete;
53e21fbc 3844 }
9238f36a
JH
3845 }
3846
3847 /* Remove all pending commands belonging to this request */
3848 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3849 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3850 if (bt_cb(skb)->req.start) {
3851 __skb_queue_head(&hdev->cmd_q, skb);
3852 break;
3853 }
3854
3855 req_complete = bt_cb(skb)->req.complete;
3856 kfree_skb(skb);
3857 }
3858 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3859
3860call_complete:
3861 if (req_complete)
3862 req_complete(hdev, status);
3863}
3864
b78752cc 3865static void hci_rx_work(struct work_struct *work)
1da177e4 3866{
b78752cc 3867 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3868 struct sk_buff *skb;
3869
3870 BT_DBG("%s", hdev->name);
3871
1da177e4 3872 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3873 /* Send copy to monitor */
3874 hci_send_to_monitor(hdev, skb);
3875
1da177e4
LT
3876 if (atomic_read(&hdev->promisc)) {
3877 /* Send copy to the sockets */
470fe1b5 3878 hci_send_to_sock(hdev, skb);
1da177e4
LT
3879 }
3880
0736cfa8
MH
3881 if (test_bit(HCI_RAW, &hdev->flags) ||
3882 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3883 kfree_skb(skb);
3884 continue;
3885 }
3886
3887 if (test_bit(HCI_INIT, &hdev->flags)) {
3888 /* Don't process data packets in this states. */
0d48d939 3889 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3890 case HCI_ACLDATA_PKT:
3891 case HCI_SCODATA_PKT:
3892 kfree_skb(skb);
3893 continue;
3ff50b79 3894 }
1da177e4
LT
3895 }
3896
3897 /* Process frame */
0d48d939 3898 switch (bt_cb(skb)->pkt_type) {
1da177e4 3899 case HCI_EVENT_PKT:
b78752cc 3900 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3901 hci_event_packet(hdev, skb);
3902 break;
3903
3904 case HCI_ACLDATA_PKT:
3905 BT_DBG("%s ACL data packet", hdev->name);
3906 hci_acldata_packet(hdev, skb);
3907 break;
3908
3909 case HCI_SCODATA_PKT:
3910 BT_DBG("%s SCO data packet", hdev->name);
3911 hci_scodata_packet(hdev, skb);
3912 break;
3913
3914 default:
3915 kfree_skb(skb);
3916 break;
3917 }
3918 }
1da177e4
LT
3919}
3920
c347b765 3921static void hci_cmd_work(struct work_struct *work)
1da177e4 3922{
c347b765 3923 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3924 struct sk_buff *skb;
3925
2104786b
AE
3926 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3927 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3928
1da177e4 3929 /* Send queued commands */
5a08ecce
AE
3930 if (atomic_read(&hdev->cmd_cnt)) {
3931 skb = skb_dequeue(&hdev->cmd_q);
3932 if (!skb)
3933 return;
3934
7585b97a 3935 kfree_skb(hdev->sent_cmd);
1da177e4 3936
a675d7f1 3937 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3938 if (hdev->sent_cmd) {
1da177e4 3939 atomic_dec(&hdev->cmd_cnt);
57d17d70 3940 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3941 if (test_bit(HCI_RESET, &hdev->flags))
3942 del_timer(&hdev->cmd_timer);
3943 else
3944 mod_timer(&hdev->cmd_timer,
5f246e89 3945 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3946 } else {
3947 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3948 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3949 }
3950 }
3951}
This page took 1.04222 seconds and 5 git commands to generate.