Bluetooth: Introduce hci_dev_clear_flag helper macro
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
64
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91 {
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132 return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
168 {
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216 failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222 const void *param, u8 event, u32 timeout)
223 {
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
232 hci_req_add_ev(&req, opcode, plen, param, event);
233
234 hdev->req_status = HCI_REQ_PEND;
235
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
242 set_current_state(TASK_RUNNING);
243 return ERR_PTR(err);
244 }
245
246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
274 return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279 const void *param, u32 timeout)
280 {
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287 void (*func)(struct hci_request *req,
288 unsigned long opt),
289 unsigned long opt, __u32 timeout)
290 {
291 struct hci_request req;
292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
297 hci_req_init(&req, hdev);
298
299 hdev->req_status = HCI_REQ_PEND;
300
301 func(&req, opt);
302
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
308 hdev->req_status = 0;
309
310 remove_wait_queue(&hdev->req_wait_q, &wait);
311 set_current_state(TASK_RUNNING);
312
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
317 */
318 if (err == -ENODATA)
319 return 0;
320
321 return err;
322 }
323
324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
333 err = -bt_to_errno(hdev->req_result);
334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
343 }
344
345 hdev->req_status = hdev->req_result = 0;
346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353 void (*req)(struct hci_request *req,
354 unsigned long opt),
355 unsigned long opt, __u32 timeout)
356 {
357 int ret;
358
359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
362 /* Serialize all requests */
363 hci_req_lock(hdev);
364 ret = __hci_req_sync(hdev, req, opt, timeout);
365 hci_req_unlock(hdev);
366
367 return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372 BT_DBG("%s %ld", req->hdev->name, opt);
373
374 /* Reset device */
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383 /* Read Local Supported Features */
384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386 /* Read Local Version */
387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389 /* Read BD Address */
390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init1(struct hci_request *req)
394 {
395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397 /* Read Local Version */
398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local AMP Info */
404 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
405
406 /* Read Data Blk size */
407 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
408
409 /* Read Flow Control Mode */
410 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
411
412 /* Read Location Data */
413 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
414 }
415
416 static void amp_init2(struct hci_request *req)
417 {
418 /* Read Local Supported Features. Not all AMP controllers
419 * support this so it's placed conditionally in the second
420 * stage init.
421 */
422 if (req->hdev->commands[14] & 0x20)
423 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
424 }
425
426 static void hci_init1_req(struct hci_request *req, unsigned long opt)
427 {
428 struct hci_dev *hdev = req->hdev;
429
430 BT_DBG("%s %ld", hdev->name, opt);
431
432 /* Reset */
433 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
434 hci_reset_req(req, 0);
435
436 switch (hdev->dev_type) {
437 case HCI_BREDR:
438 bredr_init(req);
439 break;
440
441 case HCI_AMP:
442 amp_init1(req);
443 break;
444
445 default:
446 BT_ERR("Unknown device type %d", hdev->dev_type);
447 break;
448 }
449 }
450
451 static void bredr_setup(struct hci_request *req)
452 {
453 __le16 param;
454 __u8 flt_type;
455
456 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
457 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
458
459 /* Read Class of Device */
460 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
461
462 /* Read Local Name */
463 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
464
465 /* Read Voice Setting */
466 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
467
468 /* Read Number of Supported IAC */
469 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
470
471 /* Read Current IAC LAP */
472 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
473
474 /* Clear Event Filters */
475 flt_type = HCI_FLT_CLEAR_ALL;
476 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
477
478 /* Connection accept timeout ~20 secs */
479 param = cpu_to_le16(0x7d00);
480 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
481 }
482
483 static void le_setup(struct hci_request *req)
484 {
485 struct hci_dev *hdev = req->hdev;
486
487 /* Read LE Buffer Size */
488 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
489
490 /* Read LE Local Supported Features */
491 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
492
493 /* Read LE Supported States */
494 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
495
496 /* Read LE White List Size */
497 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
498
499 /* Clear LE White List */
500 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
501
502 /* LE-only controllers have LE implicitly enabled */
503 if (!lmp_bredr_capable(hdev))
504 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
505 }
506
507 static void hci_setup_event_mask(struct hci_request *req)
508 {
509 struct hci_dev *hdev = req->hdev;
510
511 /* The second byte is 0xff instead of 0x9f (two reserved bits
512 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
513 * command otherwise.
514 */
515 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
516
517 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
518 * any event mask for pre 1.2 devices.
519 */
520 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
521 return;
522
523 if (lmp_bredr_capable(hdev)) {
524 events[4] |= 0x01; /* Flow Specification Complete */
525 events[4] |= 0x02; /* Inquiry Result with RSSI */
526 events[4] |= 0x04; /* Read Remote Extended Features Complete */
527 events[5] |= 0x08; /* Synchronous Connection Complete */
528 events[5] |= 0x10; /* Synchronous Connection Changed */
529 } else {
530 /* Use a different default for LE-only devices */
531 memset(events, 0, sizeof(events));
532 events[0] |= 0x10; /* Disconnection Complete */
533 events[1] |= 0x08; /* Read Remote Version Information Complete */
534 events[1] |= 0x20; /* Command Complete */
535 events[1] |= 0x40; /* Command Status */
536 events[1] |= 0x80; /* Hardware Error */
537 events[2] |= 0x04; /* Number of Completed Packets */
538 events[3] |= 0x02; /* Data Buffer Overflow */
539
540 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
541 events[0] |= 0x80; /* Encryption Change */
542 events[5] |= 0x80; /* Encryption Key Refresh Complete */
543 }
544 }
545
546 if (lmp_inq_rssi_capable(hdev))
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548
549 if (lmp_sniffsubr_capable(hdev))
550 events[5] |= 0x20; /* Sniff Subrating */
551
552 if (lmp_pause_enc_capable(hdev))
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554
555 if (lmp_ext_inq_capable(hdev))
556 events[5] |= 0x40; /* Extended Inquiry Result */
557
558 if (lmp_no_flush_capable(hdev))
559 events[7] |= 0x01; /* Enhanced Flush Complete */
560
561 if (lmp_lsto_capable(hdev))
562 events[6] |= 0x80; /* Link Supervision Timeout Changed */
563
564 if (lmp_ssp_capable(hdev)) {
565 events[6] |= 0x01; /* IO Capability Request */
566 events[6] |= 0x02; /* IO Capability Response */
567 events[6] |= 0x04; /* User Confirmation Request */
568 events[6] |= 0x08; /* User Passkey Request */
569 events[6] |= 0x10; /* Remote OOB Data Request */
570 events[6] |= 0x20; /* Simple Pairing Complete */
571 events[7] |= 0x04; /* User Passkey Notification */
572 events[7] |= 0x08; /* Keypress Notification */
573 events[7] |= 0x10; /* Remote Host Supported
574 * Features Notification
575 */
576 }
577
578 if (lmp_le_capable(hdev))
579 events[7] |= 0x20; /* LE Meta-Event */
580
581 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
582 }
583
584 static void hci_init2_req(struct hci_request *req, unsigned long opt)
585 {
586 struct hci_dev *hdev = req->hdev;
587
588 if (hdev->dev_type == HCI_AMP)
589 return amp_init2(req);
590
591 if (lmp_bredr_capable(hdev))
592 bredr_setup(req);
593 else
594 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
595
596 if (lmp_le_capable(hdev))
597 le_setup(req);
598
599 /* All Bluetooth 1.2 and later controllers should support the
600 * HCI command for reading the local supported commands.
601 *
602 * Unfortunately some controllers indicate Bluetooth 1.2 support,
603 * but do not have support for this command. If that is the case,
604 * the driver can quirk the behavior and skip reading the local
605 * supported commands.
606 */
607 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
608 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
609 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
610
611 if (lmp_ssp_capable(hdev)) {
612 /* When SSP is available, then the host features page
613 * should also be available as well. However some
614 * controllers list the max_page as 0 as long as SSP
615 * has not been enabled. To achieve proper debugging
616 * output, force the minimum max_page to 1 at least.
617 */
618 hdev->max_page = 0x01;
619
620 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
621 u8 mode = 0x01;
622
623 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
624 sizeof(mode), &mode);
625 } else {
626 struct hci_cp_write_eir cp;
627
628 memset(hdev->eir, 0, sizeof(hdev->eir));
629 memset(&cp, 0, sizeof(cp));
630
631 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
632 }
633 }
634
635 if (lmp_inq_rssi_capable(hdev) ||
636 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
637 u8 mode;
638
639 /* If Extended Inquiry Result events are supported, then
640 * they are clearly preferred over Inquiry Result with RSSI
641 * events.
642 */
643 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
644
645 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
646 }
647
648 if (lmp_inq_tx_pwr_capable(hdev))
649 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
650
651 if (lmp_ext_feat_capable(hdev)) {
652 struct hci_cp_read_local_ext_features cp;
653
654 cp.page = 0x01;
655 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
656 sizeof(cp), &cp);
657 }
658
659 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
660 u8 enable = 1;
661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
662 &enable);
663 }
664 }
665
666 static void hci_setup_link_policy(struct hci_request *req)
667 {
668 struct hci_dev *hdev = req->hdev;
669 struct hci_cp_write_def_link_policy cp;
670 u16 link_policy = 0;
671
672 if (lmp_rswitch_capable(hdev))
673 link_policy |= HCI_LP_RSWITCH;
674 if (lmp_hold_capable(hdev))
675 link_policy |= HCI_LP_HOLD;
676 if (lmp_sniff_capable(hdev))
677 link_policy |= HCI_LP_SNIFF;
678 if (lmp_park_capable(hdev))
679 link_policy |= HCI_LP_PARK;
680
681 cp.policy = cpu_to_le16(link_policy);
682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
683 }
684
685 static void hci_set_le_support(struct hci_request *req)
686 {
687 struct hci_dev *hdev = req->hdev;
688 struct hci_cp_write_le_host_supported cp;
689
690 /* LE-only devices do not support explicit enablement */
691 if (!lmp_bredr_capable(hdev))
692 return;
693
694 memset(&cp, 0, sizeof(cp));
695
696 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
697 cp.le = 0x01;
698 cp.simul = 0x00;
699 }
700
701 if (cp.le != lmp_host_le_capable(hdev))
702 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
703 &cp);
704 }
705
706 static void hci_set_event_mask_page_2(struct hci_request *req)
707 {
708 struct hci_dev *hdev = req->hdev;
709 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
710
711 /* If Connectionless Slave Broadcast master role is supported
712 * enable all necessary events for it.
713 */
714 if (lmp_csb_master_capable(hdev)) {
715 events[1] |= 0x40; /* Triggered Clock Capture */
716 events[1] |= 0x80; /* Synchronization Train Complete */
717 events[2] |= 0x10; /* Slave Page Response Timeout */
718 events[2] |= 0x20; /* CSB Channel Map Change */
719 }
720
721 /* If Connectionless Slave Broadcast slave role is supported
722 * enable all necessary events for it.
723 */
724 if (lmp_csb_slave_capable(hdev)) {
725 events[2] |= 0x01; /* Synchronization Train Received */
726 events[2] |= 0x02; /* CSB Receive */
727 events[2] |= 0x04; /* CSB Timeout */
728 events[2] |= 0x08; /* Truncated Page Complete */
729 }
730
731 /* Enable Authenticated Payload Timeout Expired event if supported */
732 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
733 events[2] |= 0x80;
734
735 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
736 }
737
738 static void hci_init3_req(struct hci_request *req, unsigned long opt)
739 {
740 struct hci_dev *hdev = req->hdev;
741 u8 p;
742
743 hci_setup_event_mask(req);
744
745 if (hdev->commands[6] & 0x20) {
746 struct hci_cp_read_stored_link_key cp;
747
748 bacpy(&cp.bdaddr, BDADDR_ANY);
749 cp.read_all = 0x01;
750 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
751 }
752
753 if (hdev->commands[5] & 0x10)
754 hci_setup_link_policy(req);
755
756 if (hdev->commands[8] & 0x01)
757 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
758
759 /* Some older Broadcom based Bluetooth 1.2 controllers do not
760 * support the Read Page Scan Type command. Check support for
761 * this command in the bit mask of supported commands.
762 */
763 if (hdev->commands[13] & 0x01)
764 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
765
766 if (lmp_le_capable(hdev)) {
767 u8 events[8];
768
769 memset(events, 0, sizeof(events));
770 events[0] = 0x0f;
771
772 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
773 events[0] |= 0x10; /* LE Long Term Key Request */
774
775 /* If controller supports the Connection Parameters Request
776 * Link Layer Procedure, enable the corresponding event.
777 */
778 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
779 events[0] |= 0x20; /* LE Remote Connection
780 * Parameter Request
781 */
782
783 /* If the controller supports the Data Length Extension
784 * feature, enable the corresponding event.
785 */
786 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
787 events[0] |= 0x40; /* LE Data Length Change */
788
789 /* If the controller supports Extended Scanner Filter
790 * Policies, enable the correspondig event.
791 */
792 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
793 events[1] |= 0x04; /* LE Direct Advertising
794 * Report
795 */
796
797 /* If the controller supports the LE Read Local P-256
798 * Public Key command, enable the corresponding event.
799 */
800 if (hdev->commands[34] & 0x02)
801 events[0] |= 0x80; /* LE Read Local P-256
802 * Public Key Complete
803 */
804
805 /* If the controller supports the LE Generate DHKey
806 * command, enable the corresponding event.
807 */
808 if (hdev->commands[34] & 0x04)
809 events[1] |= 0x01; /* LE Generate DHKey Complete */
810
811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
812 events);
813
814 if (hdev->commands[25] & 0x40) {
815 /* Read LE Advertising Channel TX Power */
816 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
817 }
818
819 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
820 /* Read LE Maximum Data Length */
821 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
822
823 /* Read LE Suggested Default Data Length */
824 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
825 }
826
827 hci_set_le_support(req);
828 }
829
830 /* Read features beyond page 1 if available */
831 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
832 struct hci_cp_read_local_ext_features cp;
833
834 cp.page = p;
835 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
836 sizeof(cp), &cp);
837 }
838 }
839
840 static void hci_init4_req(struct hci_request *req, unsigned long opt)
841 {
842 struct hci_dev *hdev = req->hdev;
843
844 /* Some Broadcom based Bluetooth controllers do not support the
845 * Delete Stored Link Key command. They are clearly indicating its
846 * absence in the bit mask of supported commands.
847 *
848 * Check the supported commands and only if the the command is marked
849 * as supported send it. If not supported assume that the controller
850 * does not have actual support for stored link keys which makes this
851 * command redundant anyway.
852 *
853 * Some controllers indicate that they support handling deleting
854 * stored link keys, but they don't. The quirk lets a driver
855 * just disable this command.
856 */
857 if (hdev->commands[6] & 0x80 &&
858 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
859 struct hci_cp_delete_stored_link_key cp;
860
861 bacpy(&cp.bdaddr, BDADDR_ANY);
862 cp.delete_all = 0x01;
863 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
864 sizeof(cp), &cp);
865 }
866
867 /* Set event mask page 2 if the HCI command for it is supported */
868 if (hdev->commands[22] & 0x04)
869 hci_set_event_mask_page_2(req);
870
871 /* Read local codec list if the HCI command is supported */
872 if (hdev->commands[29] & 0x20)
873 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
874
875 /* Get MWS transport configuration if the HCI command is supported */
876 if (hdev->commands[30] & 0x08)
877 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
878
879 /* Check for Synchronization Train support */
880 if (lmp_sync_train_capable(hdev))
881 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
882
883 /* Enable Secure Connections if supported and configured */
884 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
885 bredr_sc_enabled(hdev)) {
886 u8 support = 0x01;
887
888 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
889 sizeof(support), &support);
890 }
891 }
892
893 static int __hci_init(struct hci_dev *hdev)
894 {
895 int err;
896
897 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
898 if (err < 0)
899 return err;
900
901 /* The Device Under Test (DUT) mode is special and available for
902 * all controller types. So just create it early on.
903 */
904 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906 &dut_mode_fops);
907 }
908
909 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
910 if (err < 0)
911 return err;
912
913 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
914 * BR/EDR/LE type controllers. AMP controllers only need the
915 * first two stages of init.
916 */
917 if (hdev->dev_type != HCI_BREDR)
918 return 0;
919
920 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
924 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
925 if (err < 0)
926 return err;
927
928 /* This function is only called when the controller is actually in
929 * configured state. When the controller is marked as unconfigured,
930 * this initialization procedure is not run.
931 *
932 * It means that it is possible that a controller runs through its
933 * setup phase and then discovers missing settings. If that is the
934 * case, then this function will not be called. It then will only
935 * be called during the config phase.
936 *
937 * So only when in setup phase or config phase, create the debugfs
938 * entries and register the SMP channels.
939 */
940 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
941 !hci_dev_test_flag(hdev, HCI_CONFIG))
942 return 0;
943
944 hci_debugfs_create_common(hdev);
945
946 if (lmp_bredr_capable(hdev))
947 hci_debugfs_create_bredr(hdev);
948
949 if (lmp_le_capable(hdev))
950 hci_debugfs_create_le(hdev);
951
952 return 0;
953 }
954
955 static void hci_init0_req(struct hci_request *req, unsigned long opt)
956 {
957 struct hci_dev *hdev = req->hdev;
958
959 BT_DBG("%s %ld", hdev->name, opt);
960
961 /* Reset */
962 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
963 hci_reset_req(req, 0);
964
965 /* Read Local Version */
966 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
967
968 /* Read BD Address */
969 if (hdev->set_bdaddr)
970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
971 }
972
973 static int __hci_unconf_init(struct hci_dev *hdev)
974 {
975 int err;
976
977 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
978 return 0;
979
980 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
981 if (err < 0)
982 return err;
983
984 return 0;
985 }
986
987 static void hci_scan_req(struct hci_request *req, unsigned long opt)
988 {
989 __u8 scan = opt;
990
991 BT_DBG("%s %x", req->hdev->name, scan);
992
993 /* Inquiry and Page scans */
994 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
995 }
996
997 static void hci_auth_req(struct hci_request *req, unsigned long opt)
998 {
999 __u8 auth = opt;
1000
1001 BT_DBG("%s %x", req->hdev->name, auth);
1002
1003 /* Authentication */
1004 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1005 }
1006
1007 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1008 {
1009 __u8 encrypt = opt;
1010
1011 BT_DBG("%s %x", req->hdev->name, encrypt);
1012
1013 /* Encryption */
1014 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1015 }
1016
1017 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1018 {
1019 __le16 policy = cpu_to_le16(opt);
1020
1021 BT_DBG("%s %x", req->hdev->name, policy);
1022
1023 /* Default link policy */
1024 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1025 }
1026
1027 /* Get HCI device by index.
1028 * Device is held on return. */
1029 struct hci_dev *hci_dev_get(int index)
1030 {
1031 struct hci_dev *hdev = NULL, *d;
1032
1033 BT_DBG("%d", index);
1034
1035 if (index < 0)
1036 return NULL;
1037
1038 read_lock(&hci_dev_list_lock);
1039 list_for_each_entry(d, &hci_dev_list, list) {
1040 if (d->id == index) {
1041 hdev = hci_dev_hold(d);
1042 break;
1043 }
1044 }
1045 read_unlock(&hci_dev_list_lock);
1046 return hdev;
1047 }
1048
1049 /* ---- Inquiry support ---- */
1050
1051 bool hci_discovery_active(struct hci_dev *hdev)
1052 {
1053 struct discovery_state *discov = &hdev->discovery;
1054
1055 switch (discov->state) {
1056 case DISCOVERY_FINDING:
1057 case DISCOVERY_RESOLVING:
1058 return true;
1059
1060 default:
1061 return false;
1062 }
1063 }
1064
1065 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1066 {
1067 int old_state = hdev->discovery.state;
1068
1069 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1070
1071 if (old_state == state)
1072 return;
1073
1074 hdev->discovery.state = state;
1075
1076 switch (state) {
1077 case DISCOVERY_STOPPED:
1078 hci_update_background_scan(hdev);
1079
1080 if (old_state != DISCOVERY_STARTING)
1081 mgmt_discovering(hdev, 0);
1082 break;
1083 case DISCOVERY_STARTING:
1084 break;
1085 case DISCOVERY_FINDING:
1086 mgmt_discovering(hdev, 1);
1087 break;
1088 case DISCOVERY_RESOLVING:
1089 break;
1090 case DISCOVERY_STOPPING:
1091 break;
1092 }
1093 }
1094
1095 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1096 {
1097 struct discovery_state *cache = &hdev->discovery;
1098 struct inquiry_entry *p, *n;
1099
1100 list_for_each_entry_safe(p, n, &cache->all, all) {
1101 list_del(&p->all);
1102 kfree(p);
1103 }
1104
1105 INIT_LIST_HEAD(&cache->unknown);
1106 INIT_LIST_HEAD(&cache->resolve);
1107 }
1108
1109 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110 bdaddr_t *bdaddr)
1111 {
1112 struct discovery_state *cache = &hdev->discovery;
1113 struct inquiry_entry *e;
1114
1115 BT_DBG("cache %p, %pMR", cache, bdaddr);
1116
1117 list_for_each_entry(e, &cache->all, all) {
1118 if (!bacmp(&e->data.bdaddr, bdaddr))
1119 return e;
1120 }
1121
1122 return NULL;
1123 }
1124
1125 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1126 bdaddr_t *bdaddr)
1127 {
1128 struct discovery_state *cache = &hdev->discovery;
1129 struct inquiry_entry *e;
1130
1131 BT_DBG("cache %p, %pMR", cache, bdaddr);
1132
1133 list_for_each_entry(e, &cache->unknown, list) {
1134 if (!bacmp(&e->data.bdaddr, bdaddr))
1135 return e;
1136 }
1137
1138 return NULL;
1139 }
1140
1141 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr,
1143 int state)
1144 {
1145 struct discovery_state *cache = &hdev->discovery;
1146 struct inquiry_entry *e;
1147
1148 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1149
1150 list_for_each_entry(e, &cache->resolve, list) {
1151 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1152 return e;
1153 if (!bacmp(&e->data.bdaddr, bdaddr))
1154 return e;
1155 }
1156
1157 return NULL;
1158 }
1159
1160 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1161 struct inquiry_entry *ie)
1162 {
1163 struct discovery_state *cache = &hdev->discovery;
1164 struct list_head *pos = &cache->resolve;
1165 struct inquiry_entry *p;
1166
1167 list_del(&ie->list);
1168
1169 list_for_each_entry(p, &cache->resolve, list) {
1170 if (p->name_state != NAME_PENDING &&
1171 abs(p->data.rssi) >= abs(ie->data.rssi))
1172 break;
1173 pos = &p->list;
1174 }
1175
1176 list_add(&ie->list, pos);
1177 }
1178
1179 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180 bool name_known)
1181 {
1182 struct discovery_state *cache = &hdev->discovery;
1183 struct inquiry_entry *ie;
1184 u32 flags = 0;
1185
1186 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1187
1188 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1189
1190 if (!data->ssp_mode)
1191 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1192
1193 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1194 if (ie) {
1195 if (!ie->data.ssp_mode)
1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1197
1198 if (ie->name_state == NAME_NEEDED &&
1199 data->rssi != ie->data.rssi) {
1200 ie->data.rssi = data->rssi;
1201 hci_inquiry_cache_update_resolve(hdev, ie);
1202 }
1203
1204 goto update;
1205 }
1206
1207 /* Entry not in the cache. Add new one. */
1208 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1209 if (!ie) {
1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 goto done;
1212 }
1213
1214 list_add(&ie->all, &cache->all);
1215
1216 if (name_known) {
1217 ie->name_state = NAME_KNOWN;
1218 } else {
1219 ie->name_state = NAME_NOT_KNOWN;
1220 list_add(&ie->list, &cache->unknown);
1221 }
1222
1223 update:
1224 if (name_known && ie->name_state != NAME_KNOWN &&
1225 ie->name_state != NAME_PENDING) {
1226 ie->name_state = NAME_KNOWN;
1227 list_del(&ie->list);
1228 }
1229
1230 memcpy(&ie->data, data, sizeof(*data));
1231 ie->timestamp = jiffies;
1232 cache->timestamp = jiffies;
1233
1234 if (ie->name_state == NAME_NOT_KNOWN)
1235 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1236
1237 done:
1238 return flags;
1239 }
1240
1241 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1242 {
1243 struct discovery_state *cache = &hdev->discovery;
1244 struct inquiry_info *info = (struct inquiry_info *) buf;
1245 struct inquiry_entry *e;
1246 int copied = 0;
1247
1248 list_for_each_entry(e, &cache->all, all) {
1249 struct inquiry_data *data = &e->data;
1250
1251 if (copied >= num)
1252 break;
1253
1254 bacpy(&info->bdaddr, &data->bdaddr);
1255 info->pscan_rep_mode = data->pscan_rep_mode;
1256 info->pscan_period_mode = data->pscan_period_mode;
1257 info->pscan_mode = data->pscan_mode;
1258 memcpy(info->dev_class, data->dev_class, 3);
1259 info->clock_offset = data->clock_offset;
1260
1261 info++;
1262 copied++;
1263 }
1264
1265 BT_DBG("cache %p, copied %d", cache, copied);
1266 return copied;
1267 }
1268
1269 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1270 {
1271 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1272 struct hci_dev *hdev = req->hdev;
1273 struct hci_cp_inquiry cp;
1274
1275 BT_DBG("%s", hdev->name);
1276
1277 if (test_bit(HCI_INQUIRY, &hdev->flags))
1278 return;
1279
1280 /* Start Inquiry */
1281 memcpy(&cp.lap, &ir->lap, 3);
1282 cp.length = ir->length;
1283 cp.num_rsp = ir->num_rsp;
1284 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1285 }
1286
1287 int hci_inquiry(void __user *arg)
1288 {
1289 __u8 __user *ptr = arg;
1290 struct hci_inquiry_req ir;
1291 struct hci_dev *hdev;
1292 int err = 0, do_inquiry = 0, max_rsp;
1293 long timeo;
1294 __u8 *buf;
1295
1296 if (copy_from_user(&ir, ptr, sizeof(ir)))
1297 return -EFAULT;
1298
1299 hdev = hci_dev_get(ir.dev_id);
1300 if (!hdev)
1301 return -ENODEV;
1302
1303 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1304 err = -EBUSY;
1305 goto done;
1306 }
1307
1308 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1309 err = -EOPNOTSUPP;
1310 goto done;
1311 }
1312
1313 if (hdev->dev_type != HCI_BREDR) {
1314 err = -EOPNOTSUPP;
1315 goto done;
1316 }
1317
1318 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1319 err = -EOPNOTSUPP;
1320 goto done;
1321 }
1322
1323 hci_dev_lock(hdev);
1324 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1325 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1326 hci_inquiry_cache_flush(hdev);
1327 do_inquiry = 1;
1328 }
1329 hci_dev_unlock(hdev);
1330
1331 timeo = ir.length * msecs_to_jiffies(2000);
1332
1333 if (do_inquiry) {
1334 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1335 timeo);
1336 if (err < 0)
1337 goto done;
1338
1339 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1340 * cleared). If it is interrupted by a signal, return -EINTR.
1341 */
1342 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1343 TASK_INTERRUPTIBLE))
1344 return -EINTR;
1345 }
1346
1347 /* for unlimited number of responses we will use buffer with
1348 * 255 entries
1349 */
1350 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1351
1352 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1353 * copy it to the user space.
1354 */
1355 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1356 if (!buf) {
1357 err = -ENOMEM;
1358 goto done;
1359 }
1360
1361 hci_dev_lock(hdev);
1362 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1363 hci_dev_unlock(hdev);
1364
1365 BT_DBG("num_rsp %d", ir.num_rsp);
1366
1367 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1368 ptr += sizeof(ir);
1369 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1370 ir.num_rsp))
1371 err = -EFAULT;
1372 } else
1373 err = -EFAULT;
1374
1375 kfree(buf);
1376
1377 done:
1378 hci_dev_put(hdev);
1379 return err;
1380 }
1381
1382 static int hci_dev_do_open(struct hci_dev *hdev)
1383 {
1384 int ret = 0;
1385
1386 BT_DBG("%s %p", hdev->name, hdev);
1387
1388 hci_req_lock(hdev);
1389
1390 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1391 ret = -ENODEV;
1392 goto done;
1393 }
1394
1395 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1396 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1397 /* Check for rfkill but allow the HCI setup stage to
1398 * proceed (which in itself doesn't cause any RF activity).
1399 */
1400 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1401 ret = -ERFKILL;
1402 goto done;
1403 }
1404
1405 /* Check for valid public address or a configured static
1406 * random adddress, but let the HCI setup proceed to
1407 * be able to determine if there is a public address
1408 * or not.
1409 *
1410 * In case of user channel usage, it is not important
1411 * if a public address or static random address is
1412 * available.
1413 *
1414 * This check is only valid for BR/EDR controllers
1415 * since AMP controllers do not have an address.
1416 */
1417 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1418 hdev->dev_type == HCI_BREDR &&
1419 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1420 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1421 ret = -EADDRNOTAVAIL;
1422 goto done;
1423 }
1424 }
1425
1426 if (test_bit(HCI_UP, &hdev->flags)) {
1427 ret = -EALREADY;
1428 goto done;
1429 }
1430
1431 if (hdev->open(hdev)) {
1432 ret = -EIO;
1433 goto done;
1434 }
1435
1436 atomic_set(&hdev->cmd_cnt, 1);
1437 set_bit(HCI_INIT, &hdev->flags);
1438
1439 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1440 if (hdev->setup)
1441 ret = hdev->setup(hdev);
1442
1443 /* The transport driver can set these quirks before
1444 * creating the HCI device or in its setup callback.
1445 *
1446 * In case any of them is set, the controller has to
1447 * start up as unconfigured.
1448 */
1449 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1450 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1451 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1452
1453 /* For an unconfigured controller it is required to
1454 * read at least the version information provided by
1455 * the Read Local Version Information command.
1456 *
1457 * If the set_bdaddr driver callback is provided, then
1458 * also the original Bluetooth public device address
1459 * will be read using the Read BD Address command.
1460 */
1461 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1462 ret = __hci_unconf_init(hdev);
1463 }
1464
1465 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1466 /* If public address change is configured, ensure that
1467 * the address gets programmed. If the driver does not
1468 * support changing the public address, fail the power
1469 * on procedure.
1470 */
1471 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1472 hdev->set_bdaddr)
1473 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1474 else
1475 ret = -EADDRNOTAVAIL;
1476 }
1477
1478 if (!ret) {
1479 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1481 ret = __hci_init(hdev);
1482 }
1483
1484 clear_bit(HCI_INIT, &hdev->flags);
1485
1486 if (!ret) {
1487 hci_dev_hold(hdev);
1488 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1489 set_bit(HCI_UP, &hdev->flags);
1490 hci_notify(hdev, HCI_DEV_UP);
1491 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1492 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1493 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1494 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1495 hdev->dev_type == HCI_BREDR) {
1496 hci_dev_lock(hdev);
1497 mgmt_powered(hdev, 1);
1498 hci_dev_unlock(hdev);
1499 }
1500 } else {
1501 /* Init failed, cleanup */
1502 flush_work(&hdev->tx_work);
1503 flush_work(&hdev->cmd_work);
1504 flush_work(&hdev->rx_work);
1505
1506 skb_queue_purge(&hdev->cmd_q);
1507 skb_queue_purge(&hdev->rx_q);
1508
1509 if (hdev->flush)
1510 hdev->flush(hdev);
1511
1512 if (hdev->sent_cmd) {
1513 kfree_skb(hdev->sent_cmd);
1514 hdev->sent_cmd = NULL;
1515 }
1516
1517 hdev->close(hdev);
1518 hdev->flags &= BIT(HCI_RAW);
1519 }
1520
1521 done:
1522 hci_req_unlock(hdev);
1523 return ret;
1524 }
1525
1526 /* ---- HCI ioctl helpers ---- */
1527
1528 int hci_dev_open(__u16 dev)
1529 {
1530 struct hci_dev *hdev;
1531 int err;
1532
1533 hdev = hci_dev_get(dev);
1534 if (!hdev)
1535 return -ENODEV;
1536
1537 /* Devices that are marked as unconfigured can only be powered
1538 * up as user channel. Trying to bring them up as normal devices
1539 * will result into a failure. Only user channel operation is
1540 * possible.
1541 *
1542 * When this function is called for a user channel, the flag
1543 * HCI_USER_CHANNEL will be set first before attempting to
1544 * open the device.
1545 */
1546 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1547 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1548 err = -EOPNOTSUPP;
1549 goto done;
1550 }
1551
1552 /* We need to ensure that no other power on/off work is pending
1553 * before proceeding to call hci_dev_do_open. This is
1554 * particularly important if the setup procedure has not yet
1555 * completed.
1556 */
1557 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1558 cancel_delayed_work(&hdev->power_off);
1559
1560 /* After this call it is guaranteed that the setup procedure
1561 * has finished. This means that error conditions like RFKILL
1562 * or no valid public or static random address apply.
1563 */
1564 flush_workqueue(hdev->req_workqueue);
1565
1566 /* For controllers not using the management interface and that
1567 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1568 * so that pairing works for them. Once the management interface
1569 * is in use this bit will be cleared again and userspace has
1570 * to explicitly enable it.
1571 */
1572 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1573 !hci_dev_test_flag(hdev, HCI_MGMT))
1574 hci_dev_set_flag(hdev, HCI_BONDABLE);
1575
1576 err = hci_dev_do_open(hdev);
1577
1578 done:
1579 hci_dev_put(hdev);
1580 return err;
1581 }
1582
1583 /* This function requires the caller holds hdev->lock */
1584 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1585 {
1586 struct hci_conn_params *p;
1587
1588 list_for_each_entry(p, &hdev->le_conn_params, list) {
1589 if (p->conn) {
1590 hci_conn_drop(p->conn);
1591 hci_conn_put(p->conn);
1592 p->conn = NULL;
1593 }
1594 list_del_init(&p->action);
1595 }
1596
1597 BT_DBG("All LE pending actions cleared");
1598 }
1599
1600 static int hci_dev_do_close(struct hci_dev *hdev)
1601 {
1602 BT_DBG("%s %p", hdev->name, hdev);
1603
1604 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1605 /* Execute vendor specific shutdown routine */
1606 if (hdev->shutdown)
1607 hdev->shutdown(hdev);
1608 }
1609
1610 cancel_delayed_work(&hdev->power_off);
1611
1612 hci_req_cancel(hdev, ENODEV);
1613 hci_req_lock(hdev);
1614
1615 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1616 cancel_delayed_work_sync(&hdev->cmd_timer);
1617 hci_req_unlock(hdev);
1618 return 0;
1619 }
1620
1621 /* Flush RX and TX works */
1622 flush_work(&hdev->tx_work);
1623 flush_work(&hdev->rx_work);
1624
1625 if (hdev->discov_timeout > 0) {
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = 0;
1628 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1629 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1630 }
1631
1632 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1633 cancel_delayed_work(&hdev->service_cache);
1634
1635 cancel_delayed_work_sync(&hdev->le_scan_disable);
1636 cancel_delayed_work_sync(&hdev->le_scan_restart);
1637
1638 if (hci_dev_test_flag(hdev, HCI_MGMT))
1639 cancel_delayed_work_sync(&hdev->rpa_expired);
1640
1641 /* Avoid potential lockdep warnings from the *_flush() calls by
1642 * ensuring the workqueue is empty up front.
1643 */
1644 drain_workqueue(hdev->workqueue);
1645
1646 hci_dev_lock(hdev);
1647
1648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1649
1650 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1651 if (hdev->dev_type == HCI_BREDR)
1652 mgmt_powered(hdev, 0);
1653 }
1654
1655 hci_inquiry_cache_flush(hdev);
1656 hci_pend_le_actions_clear(hdev);
1657 hci_conn_hash_flush(hdev);
1658 hci_dev_unlock(hdev);
1659
1660 smp_unregister(hdev);
1661
1662 hci_notify(hdev, HCI_DEV_DOWN);
1663
1664 if (hdev->flush)
1665 hdev->flush(hdev);
1666
1667 /* Reset device */
1668 skb_queue_purge(&hdev->cmd_q);
1669 atomic_set(&hdev->cmd_cnt, 1);
1670 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1671 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1672 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1673 set_bit(HCI_INIT, &hdev->flags);
1674 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1675 clear_bit(HCI_INIT, &hdev->flags);
1676 }
1677
1678 /* flush cmd work */
1679 flush_work(&hdev->cmd_work);
1680
1681 /* Drop queues */
1682 skb_queue_purge(&hdev->rx_q);
1683 skb_queue_purge(&hdev->cmd_q);
1684 skb_queue_purge(&hdev->raw_q);
1685
1686 /* Drop last sent command */
1687 if (hdev->sent_cmd) {
1688 cancel_delayed_work_sync(&hdev->cmd_timer);
1689 kfree_skb(hdev->sent_cmd);
1690 hdev->sent_cmd = NULL;
1691 }
1692
1693 kfree_skb(hdev->recv_evt);
1694 hdev->recv_evt = NULL;
1695
1696 /* After this point our queues are empty
1697 * and no tasks are scheduled. */
1698 hdev->close(hdev);
1699
1700 /* Clear flags */
1701 hdev->flags &= BIT(HCI_RAW);
1702 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1703
1704 /* Controller radio is available but is currently powered down */
1705 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1706
1707 memset(hdev->eir, 0, sizeof(hdev->eir));
1708 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1709 bacpy(&hdev->random_addr, BDADDR_ANY);
1710
1711 hci_req_unlock(hdev);
1712
1713 hci_dev_put(hdev);
1714 return 0;
1715 }
1716
1717 int hci_dev_close(__u16 dev)
1718 {
1719 struct hci_dev *hdev;
1720 int err;
1721
1722 hdev = hci_dev_get(dev);
1723 if (!hdev)
1724 return -ENODEV;
1725
1726 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1727 err = -EBUSY;
1728 goto done;
1729 }
1730
1731 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1732 cancel_delayed_work(&hdev->power_off);
1733
1734 err = hci_dev_do_close(hdev);
1735
1736 done:
1737 hci_dev_put(hdev);
1738 return err;
1739 }
1740
1741 static int hci_dev_do_reset(struct hci_dev *hdev)
1742 {
1743 int ret;
1744
1745 BT_DBG("%s %p", hdev->name, hdev);
1746
1747 hci_req_lock(hdev);
1748
1749 /* Drop queues */
1750 skb_queue_purge(&hdev->rx_q);
1751 skb_queue_purge(&hdev->cmd_q);
1752
1753 /* Avoid potential lockdep warnings from the *_flush() calls by
1754 * ensuring the workqueue is empty up front.
1755 */
1756 drain_workqueue(hdev->workqueue);
1757
1758 hci_dev_lock(hdev);
1759 hci_inquiry_cache_flush(hdev);
1760 hci_conn_hash_flush(hdev);
1761 hci_dev_unlock(hdev);
1762
1763 if (hdev->flush)
1764 hdev->flush(hdev);
1765
1766 atomic_set(&hdev->cmd_cnt, 1);
1767 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1768
1769 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1770
1771 hci_req_unlock(hdev);
1772 return ret;
1773 }
1774
1775 int hci_dev_reset(__u16 dev)
1776 {
1777 struct hci_dev *hdev;
1778 int err;
1779
1780 hdev = hci_dev_get(dev);
1781 if (!hdev)
1782 return -ENODEV;
1783
1784 if (!test_bit(HCI_UP, &hdev->flags)) {
1785 err = -ENETDOWN;
1786 goto done;
1787 }
1788
1789 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1790 err = -EBUSY;
1791 goto done;
1792 }
1793
1794 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1795 err = -EOPNOTSUPP;
1796 goto done;
1797 }
1798
1799 err = hci_dev_do_reset(hdev);
1800
1801 done:
1802 hci_dev_put(hdev);
1803 return err;
1804 }
1805
1806 int hci_dev_reset_stat(__u16 dev)
1807 {
1808 struct hci_dev *hdev;
1809 int ret = 0;
1810
1811 hdev = hci_dev_get(dev);
1812 if (!hdev)
1813 return -ENODEV;
1814
1815 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1816 ret = -EBUSY;
1817 goto done;
1818 }
1819
1820 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1821 ret = -EOPNOTSUPP;
1822 goto done;
1823 }
1824
1825 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1826
1827 done:
1828 hci_dev_put(hdev);
1829 return ret;
1830 }
1831
1832 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1833 {
1834 bool conn_changed, discov_changed;
1835
1836 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1837
1838 if ((scan & SCAN_PAGE))
1839 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1840 &hdev->dev_flags);
1841 else
1842 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1843 &hdev->dev_flags);
1844
1845 if ((scan & SCAN_INQUIRY)) {
1846 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1847 &hdev->dev_flags);
1848 } else {
1849 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1850 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1851 &hdev->dev_flags);
1852 }
1853
1854 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1855 return;
1856
1857 if (conn_changed || discov_changed) {
1858 /* In case this was disabled through mgmt */
1859 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1860
1861 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1862 mgmt_update_adv_data(hdev);
1863
1864 mgmt_new_settings(hdev);
1865 }
1866 }
1867
1868 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1869 {
1870 struct hci_dev *hdev;
1871 struct hci_dev_req dr;
1872 int err = 0;
1873
1874 if (copy_from_user(&dr, arg, sizeof(dr)))
1875 return -EFAULT;
1876
1877 hdev = hci_dev_get(dr.dev_id);
1878 if (!hdev)
1879 return -ENODEV;
1880
1881 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1882 err = -EBUSY;
1883 goto done;
1884 }
1885
1886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1887 err = -EOPNOTSUPP;
1888 goto done;
1889 }
1890
1891 if (hdev->dev_type != HCI_BREDR) {
1892 err = -EOPNOTSUPP;
1893 goto done;
1894 }
1895
1896 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1897 err = -EOPNOTSUPP;
1898 goto done;
1899 }
1900
1901 switch (cmd) {
1902 case HCISETAUTH:
1903 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1904 HCI_INIT_TIMEOUT);
1905 break;
1906
1907 case HCISETENCRYPT:
1908 if (!lmp_encrypt_capable(hdev)) {
1909 err = -EOPNOTSUPP;
1910 break;
1911 }
1912
1913 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1914 /* Auth must be enabled first */
1915 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1916 HCI_INIT_TIMEOUT);
1917 if (err)
1918 break;
1919 }
1920
1921 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
1923 break;
1924
1925 case HCISETSCAN:
1926 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1927 HCI_INIT_TIMEOUT);
1928
1929 /* Ensure that the connectable and discoverable states
1930 * get correctly modified as this was a non-mgmt change.
1931 */
1932 if (!err)
1933 hci_update_scan_state(hdev, dr.dev_opt);
1934 break;
1935
1936 case HCISETLINKPOL:
1937 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1938 HCI_INIT_TIMEOUT);
1939 break;
1940
1941 case HCISETLINKMODE:
1942 hdev->link_mode = ((__u16) dr.dev_opt) &
1943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1944 break;
1945
1946 case HCISETPTYPE:
1947 hdev->pkt_type = (__u16) dr.dev_opt;
1948 break;
1949
1950 case HCISETACLMTU:
1951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1953 break;
1954
1955 case HCISETSCOMTU:
1956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1958 break;
1959
1960 default:
1961 err = -EINVAL;
1962 break;
1963 }
1964
1965 done:
1966 hci_dev_put(hdev);
1967 return err;
1968 }
1969
1970 int hci_get_dev_list(void __user *arg)
1971 {
1972 struct hci_dev *hdev;
1973 struct hci_dev_list_req *dl;
1974 struct hci_dev_req *dr;
1975 int n = 0, size, err;
1976 __u16 dev_num;
1977
1978 if (get_user(dev_num, (__u16 __user *) arg))
1979 return -EFAULT;
1980
1981 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1982 return -EINVAL;
1983
1984 size = sizeof(*dl) + dev_num * sizeof(*dr);
1985
1986 dl = kzalloc(size, GFP_KERNEL);
1987 if (!dl)
1988 return -ENOMEM;
1989
1990 dr = dl->dev_req;
1991
1992 read_lock(&hci_dev_list_lock);
1993 list_for_each_entry(hdev, &hci_dev_list, list) {
1994 unsigned long flags = hdev->flags;
1995
1996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
1999 */
2000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001 flags &= ~BIT(HCI_UP);
2002
2003 (dr + n)->dev_id = hdev->id;
2004 (dr + n)->dev_opt = flags;
2005
2006 if (++n >= dev_num)
2007 break;
2008 }
2009 read_unlock(&hci_dev_list_lock);
2010
2011 dl->dev_num = n;
2012 size = sizeof(*dl) + n * sizeof(*dr);
2013
2014 err = copy_to_user(arg, dl, size);
2015 kfree(dl);
2016
2017 return err ? -EFAULT : 0;
2018 }
2019
2020 int hci_get_dev_info(void __user *arg)
2021 {
2022 struct hci_dev *hdev;
2023 struct hci_dev_info di;
2024 unsigned long flags;
2025 int err = 0;
2026
2027 if (copy_from_user(&di, arg, sizeof(di)))
2028 return -EFAULT;
2029
2030 hdev = hci_dev_get(di.dev_id);
2031 if (!hdev)
2032 return -ENODEV;
2033
2034 /* When the auto-off is configured it means the transport
2035 * is running, but in that case still indicate that the
2036 * device is actually down.
2037 */
2038 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2039 flags = hdev->flags & ~BIT(HCI_UP);
2040 else
2041 flags = hdev->flags;
2042
2043 strcpy(di.name, hdev->name);
2044 di.bdaddr = hdev->bdaddr;
2045 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2046 di.flags = flags;
2047 di.pkt_type = hdev->pkt_type;
2048 if (lmp_bredr_capable(hdev)) {
2049 di.acl_mtu = hdev->acl_mtu;
2050 di.acl_pkts = hdev->acl_pkts;
2051 di.sco_mtu = hdev->sco_mtu;
2052 di.sco_pkts = hdev->sco_pkts;
2053 } else {
2054 di.acl_mtu = hdev->le_mtu;
2055 di.acl_pkts = hdev->le_pkts;
2056 di.sco_mtu = 0;
2057 di.sco_pkts = 0;
2058 }
2059 di.link_policy = hdev->link_policy;
2060 di.link_mode = hdev->link_mode;
2061
2062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2063 memcpy(&di.features, &hdev->features, sizeof(di.features));
2064
2065 if (copy_to_user(arg, &di, sizeof(di)))
2066 err = -EFAULT;
2067
2068 hci_dev_put(hdev);
2069
2070 return err;
2071 }
2072
2073 /* ---- Interface to HCI drivers ---- */
2074
2075 static int hci_rfkill_set_block(void *data, bool blocked)
2076 {
2077 struct hci_dev *hdev = data;
2078
2079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2080
2081 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2082 return -EBUSY;
2083
2084 if (blocked) {
2085 hci_dev_set_flag(hdev, HCI_RFKILLED);
2086 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2087 !hci_dev_test_flag(hdev, HCI_CONFIG))
2088 hci_dev_do_close(hdev);
2089 } else {
2090 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2091 }
2092
2093 return 0;
2094 }
2095
2096 static const struct rfkill_ops hci_rfkill_ops = {
2097 .set_block = hci_rfkill_set_block,
2098 };
2099
2100 static void hci_power_on(struct work_struct *work)
2101 {
2102 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2103 int err;
2104
2105 BT_DBG("%s", hdev->name);
2106
2107 err = hci_dev_do_open(hdev);
2108 if (err < 0) {
2109 hci_dev_lock(hdev);
2110 mgmt_set_powered_failed(hdev, err);
2111 hci_dev_unlock(hdev);
2112 return;
2113 }
2114
2115 /* During the HCI setup phase, a few error conditions are
2116 * ignored and they need to be checked now. If they are still
2117 * valid, it is important to turn the device back off.
2118 */
2119 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2120 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2121 (hdev->dev_type == HCI_BREDR &&
2122 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2123 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2124 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2125 hci_dev_do_close(hdev);
2126 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2127 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2128 HCI_AUTO_OFF_TIMEOUT);
2129 }
2130
2131 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2132 /* For unconfigured devices, set the HCI_RAW flag
2133 * so that userspace can easily identify them.
2134 */
2135 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2136 set_bit(HCI_RAW, &hdev->flags);
2137
2138 /* For fully configured devices, this will send
2139 * the Index Added event. For unconfigured devices,
2140 * it will send Unconfigued Index Added event.
2141 *
2142 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2143 * and no event will be send.
2144 */
2145 mgmt_index_added(hdev);
2146 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2147 /* When the controller is now configured, then it
2148 * is important to clear the HCI_RAW flag.
2149 */
2150 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2151 clear_bit(HCI_RAW, &hdev->flags);
2152
2153 /* Powering on the controller with HCI_CONFIG set only
2154 * happens with the transition from unconfigured to
2155 * configured. This will send the Index Added event.
2156 */
2157 mgmt_index_added(hdev);
2158 }
2159 }
2160
2161 static void hci_power_off(struct work_struct *work)
2162 {
2163 struct hci_dev *hdev = container_of(work, struct hci_dev,
2164 power_off.work);
2165
2166 BT_DBG("%s", hdev->name);
2167
2168 hci_dev_do_close(hdev);
2169 }
2170
2171 static void hci_error_reset(struct work_struct *work)
2172 {
2173 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2174
2175 BT_DBG("%s", hdev->name);
2176
2177 if (hdev->hw_error)
2178 hdev->hw_error(hdev, hdev->hw_error_code);
2179 else
2180 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2181 hdev->hw_error_code);
2182
2183 if (hci_dev_do_close(hdev))
2184 return;
2185
2186 hci_dev_do_open(hdev);
2187 }
2188
2189 static void hci_discov_off(struct work_struct *work)
2190 {
2191 struct hci_dev *hdev;
2192
2193 hdev = container_of(work, struct hci_dev, discov_off.work);
2194
2195 BT_DBG("%s", hdev->name);
2196
2197 mgmt_discoverable_timeout(hdev);
2198 }
2199
2200 void hci_uuids_clear(struct hci_dev *hdev)
2201 {
2202 struct bt_uuid *uuid, *tmp;
2203
2204 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2205 list_del(&uuid->list);
2206 kfree(uuid);
2207 }
2208 }
2209
2210 void hci_link_keys_clear(struct hci_dev *hdev)
2211 {
2212 struct link_key *key;
2213
2214 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2215 list_del_rcu(&key->list);
2216 kfree_rcu(key, rcu);
2217 }
2218 }
2219
2220 void hci_smp_ltks_clear(struct hci_dev *hdev)
2221 {
2222 struct smp_ltk *k;
2223
2224 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225 list_del_rcu(&k->list);
2226 kfree_rcu(k, rcu);
2227 }
2228 }
2229
2230 void hci_smp_irks_clear(struct hci_dev *hdev)
2231 {
2232 struct smp_irk *k;
2233
2234 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2235 list_del_rcu(&k->list);
2236 kfree_rcu(k, rcu);
2237 }
2238 }
2239
2240 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2241 {
2242 struct link_key *k;
2243
2244 rcu_read_lock();
2245 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2246 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2247 rcu_read_unlock();
2248 return k;
2249 }
2250 }
2251 rcu_read_unlock();
2252
2253 return NULL;
2254 }
2255
2256 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2257 u8 key_type, u8 old_key_type)
2258 {
2259 /* Legacy key */
2260 if (key_type < 0x03)
2261 return true;
2262
2263 /* Debug keys are insecure so don't store them persistently */
2264 if (key_type == HCI_LK_DEBUG_COMBINATION)
2265 return false;
2266
2267 /* Changed combination key and there's no previous one */
2268 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2269 return false;
2270
2271 /* Security mode 3 case */
2272 if (!conn)
2273 return true;
2274
2275 /* BR/EDR key derived using SC from an LE link */
2276 if (conn->type == LE_LINK)
2277 return true;
2278
2279 /* Neither local nor remote side had no-bonding as requirement */
2280 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2281 return true;
2282
2283 /* Local side had dedicated bonding as requirement */
2284 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2285 return true;
2286
2287 /* Remote side had dedicated bonding as requirement */
2288 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2289 return true;
2290
2291 /* If none of the above criteria match, then don't store the key
2292 * persistently */
2293 return false;
2294 }
2295
2296 static u8 ltk_role(u8 type)
2297 {
2298 if (type == SMP_LTK)
2299 return HCI_ROLE_MASTER;
2300
2301 return HCI_ROLE_SLAVE;
2302 }
2303
2304 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2305 u8 addr_type, u8 role)
2306 {
2307 struct smp_ltk *k;
2308
2309 rcu_read_lock();
2310 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2311 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2312 continue;
2313
2314 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2315 rcu_read_unlock();
2316 return k;
2317 }
2318 }
2319 rcu_read_unlock();
2320
2321 return NULL;
2322 }
2323
2324 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2325 {
2326 struct smp_irk *irk;
2327
2328 rcu_read_lock();
2329 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2330 if (!bacmp(&irk->rpa, rpa)) {
2331 rcu_read_unlock();
2332 return irk;
2333 }
2334 }
2335
2336 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2337 if (smp_irk_matches(hdev, irk->val, rpa)) {
2338 bacpy(&irk->rpa, rpa);
2339 rcu_read_unlock();
2340 return irk;
2341 }
2342 }
2343 rcu_read_unlock();
2344
2345 return NULL;
2346 }
2347
2348 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2349 u8 addr_type)
2350 {
2351 struct smp_irk *irk;
2352
2353 /* Identity Address must be public or static random */
2354 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2355 return NULL;
2356
2357 rcu_read_lock();
2358 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2359 if (addr_type == irk->addr_type &&
2360 bacmp(bdaddr, &irk->bdaddr) == 0) {
2361 rcu_read_unlock();
2362 return irk;
2363 }
2364 }
2365 rcu_read_unlock();
2366
2367 return NULL;
2368 }
2369
2370 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2371 bdaddr_t *bdaddr, u8 *val, u8 type,
2372 u8 pin_len, bool *persistent)
2373 {
2374 struct link_key *key, *old_key;
2375 u8 old_key_type;
2376
2377 old_key = hci_find_link_key(hdev, bdaddr);
2378 if (old_key) {
2379 old_key_type = old_key->type;
2380 key = old_key;
2381 } else {
2382 old_key_type = conn ? conn->key_type : 0xff;
2383 key = kzalloc(sizeof(*key), GFP_KERNEL);
2384 if (!key)
2385 return NULL;
2386 list_add_rcu(&key->list, &hdev->link_keys);
2387 }
2388
2389 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2390
2391 /* Some buggy controller combinations generate a changed
2392 * combination key for legacy pairing even when there's no
2393 * previous key */
2394 if (type == HCI_LK_CHANGED_COMBINATION &&
2395 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2396 type = HCI_LK_COMBINATION;
2397 if (conn)
2398 conn->key_type = type;
2399 }
2400
2401 bacpy(&key->bdaddr, bdaddr);
2402 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2403 key->pin_len = pin_len;
2404
2405 if (type == HCI_LK_CHANGED_COMBINATION)
2406 key->type = old_key_type;
2407 else
2408 key->type = type;
2409
2410 if (persistent)
2411 *persistent = hci_persistent_key(hdev, conn, type,
2412 old_key_type);
2413
2414 return key;
2415 }
2416
2417 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2418 u8 addr_type, u8 type, u8 authenticated,
2419 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2420 {
2421 struct smp_ltk *key, *old_key;
2422 u8 role = ltk_role(type);
2423
2424 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2425 if (old_key)
2426 key = old_key;
2427 else {
2428 key = kzalloc(sizeof(*key), GFP_KERNEL);
2429 if (!key)
2430 return NULL;
2431 list_add_rcu(&key->list, &hdev->long_term_keys);
2432 }
2433
2434 bacpy(&key->bdaddr, bdaddr);
2435 key->bdaddr_type = addr_type;
2436 memcpy(key->val, tk, sizeof(key->val));
2437 key->authenticated = authenticated;
2438 key->ediv = ediv;
2439 key->rand = rand;
2440 key->enc_size = enc_size;
2441 key->type = type;
2442
2443 return key;
2444 }
2445
2446 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2447 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2448 {
2449 struct smp_irk *irk;
2450
2451 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2452 if (!irk) {
2453 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2454 if (!irk)
2455 return NULL;
2456
2457 bacpy(&irk->bdaddr, bdaddr);
2458 irk->addr_type = addr_type;
2459
2460 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2461 }
2462
2463 memcpy(irk->val, val, 16);
2464 bacpy(&irk->rpa, rpa);
2465
2466 return irk;
2467 }
2468
2469 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2470 {
2471 struct link_key *key;
2472
2473 key = hci_find_link_key(hdev, bdaddr);
2474 if (!key)
2475 return -ENOENT;
2476
2477 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2478
2479 list_del_rcu(&key->list);
2480 kfree_rcu(key, rcu);
2481
2482 return 0;
2483 }
2484
2485 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2486 {
2487 struct smp_ltk *k;
2488 int removed = 0;
2489
2490 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2491 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2492 continue;
2493
2494 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2495
2496 list_del_rcu(&k->list);
2497 kfree_rcu(k, rcu);
2498 removed++;
2499 }
2500
2501 return removed ? 0 : -ENOENT;
2502 }
2503
2504 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2505 {
2506 struct smp_irk *k;
2507
2508 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2509 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2510 continue;
2511
2512 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2513
2514 list_del_rcu(&k->list);
2515 kfree_rcu(k, rcu);
2516 }
2517 }
2518
2519 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2520 {
2521 struct smp_ltk *k;
2522 struct smp_irk *irk;
2523 u8 addr_type;
2524
2525 if (type == BDADDR_BREDR) {
2526 if (hci_find_link_key(hdev, bdaddr))
2527 return true;
2528 return false;
2529 }
2530
2531 /* Convert to HCI addr type which struct smp_ltk uses */
2532 if (type == BDADDR_LE_PUBLIC)
2533 addr_type = ADDR_LE_DEV_PUBLIC;
2534 else
2535 addr_type = ADDR_LE_DEV_RANDOM;
2536
2537 irk = hci_get_irk(hdev, bdaddr, addr_type);
2538 if (irk) {
2539 bdaddr = &irk->bdaddr;
2540 addr_type = irk->addr_type;
2541 }
2542
2543 rcu_read_lock();
2544 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2545 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2546 rcu_read_unlock();
2547 return true;
2548 }
2549 }
2550 rcu_read_unlock();
2551
2552 return false;
2553 }
2554
2555 /* HCI command timer function */
2556 static void hci_cmd_timeout(struct work_struct *work)
2557 {
2558 struct hci_dev *hdev = container_of(work, struct hci_dev,
2559 cmd_timer.work);
2560
2561 if (hdev->sent_cmd) {
2562 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2563 u16 opcode = __le16_to_cpu(sent->opcode);
2564
2565 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2566 } else {
2567 BT_ERR("%s command tx timeout", hdev->name);
2568 }
2569
2570 atomic_set(&hdev->cmd_cnt, 1);
2571 queue_work(hdev->workqueue, &hdev->cmd_work);
2572 }
2573
2574 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2575 bdaddr_t *bdaddr, u8 bdaddr_type)
2576 {
2577 struct oob_data *data;
2578
2579 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2580 if (bacmp(bdaddr, &data->bdaddr) != 0)
2581 continue;
2582 if (data->bdaddr_type != bdaddr_type)
2583 continue;
2584 return data;
2585 }
2586
2587 return NULL;
2588 }
2589
2590 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2591 u8 bdaddr_type)
2592 {
2593 struct oob_data *data;
2594
2595 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2596 if (!data)
2597 return -ENOENT;
2598
2599 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2600
2601 list_del(&data->list);
2602 kfree(data);
2603
2604 return 0;
2605 }
2606
2607 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2608 {
2609 struct oob_data *data, *n;
2610
2611 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2612 list_del(&data->list);
2613 kfree(data);
2614 }
2615 }
2616
2617 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2618 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2619 u8 *hash256, u8 *rand256)
2620 {
2621 struct oob_data *data;
2622
2623 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2624 if (!data) {
2625 data = kmalloc(sizeof(*data), GFP_KERNEL);
2626 if (!data)
2627 return -ENOMEM;
2628
2629 bacpy(&data->bdaddr, bdaddr);
2630 data->bdaddr_type = bdaddr_type;
2631 list_add(&data->list, &hdev->remote_oob_data);
2632 }
2633
2634 if (hash192 && rand192) {
2635 memcpy(data->hash192, hash192, sizeof(data->hash192));
2636 memcpy(data->rand192, rand192, sizeof(data->rand192));
2637 if (hash256 && rand256)
2638 data->present = 0x03;
2639 } else {
2640 memset(data->hash192, 0, sizeof(data->hash192));
2641 memset(data->rand192, 0, sizeof(data->rand192));
2642 if (hash256 && rand256)
2643 data->present = 0x02;
2644 else
2645 data->present = 0x00;
2646 }
2647
2648 if (hash256 && rand256) {
2649 memcpy(data->hash256, hash256, sizeof(data->hash256));
2650 memcpy(data->rand256, rand256, sizeof(data->rand256));
2651 } else {
2652 memset(data->hash256, 0, sizeof(data->hash256));
2653 memset(data->rand256, 0, sizeof(data->rand256));
2654 if (hash192 && rand192)
2655 data->present = 0x01;
2656 }
2657
2658 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2659
2660 return 0;
2661 }
2662
2663 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2664 bdaddr_t *bdaddr, u8 type)
2665 {
2666 struct bdaddr_list *b;
2667
2668 list_for_each_entry(b, bdaddr_list, list) {
2669 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2670 return b;
2671 }
2672
2673 return NULL;
2674 }
2675
2676 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2677 {
2678 struct list_head *p, *n;
2679
2680 list_for_each_safe(p, n, bdaddr_list) {
2681 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2682
2683 list_del(p);
2684 kfree(b);
2685 }
2686 }
2687
2688 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2689 {
2690 struct bdaddr_list *entry;
2691
2692 if (!bacmp(bdaddr, BDADDR_ANY))
2693 return -EBADF;
2694
2695 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2696 return -EEXIST;
2697
2698 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2699 if (!entry)
2700 return -ENOMEM;
2701
2702 bacpy(&entry->bdaddr, bdaddr);
2703 entry->bdaddr_type = type;
2704
2705 list_add(&entry->list, list);
2706
2707 return 0;
2708 }
2709
2710 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2711 {
2712 struct bdaddr_list *entry;
2713
2714 if (!bacmp(bdaddr, BDADDR_ANY)) {
2715 hci_bdaddr_list_clear(list);
2716 return 0;
2717 }
2718
2719 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2720 if (!entry)
2721 return -ENOENT;
2722
2723 list_del(&entry->list);
2724 kfree(entry);
2725
2726 return 0;
2727 }
2728
2729 /* This function requires the caller holds hdev->lock */
2730 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2731 bdaddr_t *addr, u8 addr_type)
2732 {
2733 struct hci_conn_params *params;
2734
2735 /* The conn params list only contains identity addresses */
2736 if (!hci_is_identity_address(addr, addr_type))
2737 return NULL;
2738
2739 list_for_each_entry(params, &hdev->le_conn_params, list) {
2740 if (bacmp(&params->addr, addr) == 0 &&
2741 params->addr_type == addr_type) {
2742 return params;
2743 }
2744 }
2745
2746 return NULL;
2747 }
2748
2749 /* This function requires the caller holds hdev->lock */
2750 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2751 bdaddr_t *addr, u8 addr_type)
2752 {
2753 struct hci_conn_params *param;
2754
2755 /* The list only contains identity addresses */
2756 if (!hci_is_identity_address(addr, addr_type))
2757 return NULL;
2758
2759 list_for_each_entry(param, list, action) {
2760 if (bacmp(&param->addr, addr) == 0 &&
2761 param->addr_type == addr_type)
2762 return param;
2763 }
2764
2765 return NULL;
2766 }
2767
2768 /* This function requires the caller holds hdev->lock */
2769 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2770 bdaddr_t *addr, u8 addr_type)
2771 {
2772 struct hci_conn_params *params;
2773
2774 if (!hci_is_identity_address(addr, addr_type))
2775 return NULL;
2776
2777 params = hci_conn_params_lookup(hdev, addr, addr_type);
2778 if (params)
2779 return params;
2780
2781 params = kzalloc(sizeof(*params), GFP_KERNEL);
2782 if (!params) {
2783 BT_ERR("Out of memory");
2784 return NULL;
2785 }
2786
2787 bacpy(&params->addr, addr);
2788 params->addr_type = addr_type;
2789
2790 list_add(&params->list, &hdev->le_conn_params);
2791 INIT_LIST_HEAD(&params->action);
2792
2793 params->conn_min_interval = hdev->le_conn_min_interval;
2794 params->conn_max_interval = hdev->le_conn_max_interval;
2795 params->conn_latency = hdev->le_conn_latency;
2796 params->supervision_timeout = hdev->le_supv_timeout;
2797 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2798
2799 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2800
2801 return params;
2802 }
2803
2804 static void hci_conn_params_free(struct hci_conn_params *params)
2805 {
2806 if (params->conn) {
2807 hci_conn_drop(params->conn);
2808 hci_conn_put(params->conn);
2809 }
2810
2811 list_del(&params->action);
2812 list_del(&params->list);
2813 kfree(params);
2814 }
2815
2816 /* This function requires the caller holds hdev->lock */
2817 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2818 {
2819 struct hci_conn_params *params;
2820
2821 params = hci_conn_params_lookup(hdev, addr, addr_type);
2822 if (!params)
2823 return;
2824
2825 hci_conn_params_free(params);
2826
2827 hci_update_background_scan(hdev);
2828
2829 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830 }
2831
2832 /* This function requires the caller holds hdev->lock */
2833 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2834 {
2835 struct hci_conn_params *params, *tmp;
2836
2837 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2838 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2839 continue;
2840 list_del(&params->list);
2841 kfree(params);
2842 }
2843
2844 BT_DBG("All LE disabled connection parameters were removed");
2845 }
2846
2847 /* This function requires the caller holds hdev->lock */
2848 void hci_conn_params_clear_all(struct hci_dev *hdev)
2849 {
2850 struct hci_conn_params *params, *tmp;
2851
2852 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2853 hci_conn_params_free(params);
2854
2855 hci_update_background_scan(hdev);
2856
2857 BT_DBG("All LE connection parameters were removed");
2858 }
2859
2860 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2861 {
2862 if (status) {
2863 BT_ERR("Failed to start inquiry: status %d", status);
2864
2865 hci_dev_lock(hdev);
2866 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2867 hci_dev_unlock(hdev);
2868 return;
2869 }
2870 }
2871
2872 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2873 u16 opcode)
2874 {
2875 /* General inquiry access code (GIAC) */
2876 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2877 struct hci_request req;
2878 struct hci_cp_inquiry cp;
2879 int err;
2880
2881 if (status) {
2882 BT_ERR("Failed to disable LE scanning: status %d", status);
2883 return;
2884 }
2885
2886 hdev->discovery.scan_start = 0;
2887
2888 switch (hdev->discovery.type) {
2889 case DISCOV_TYPE_LE:
2890 hci_dev_lock(hdev);
2891 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2892 hci_dev_unlock(hdev);
2893 break;
2894
2895 case DISCOV_TYPE_INTERLEAVED:
2896 hci_req_init(&req, hdev);
2897
2898 memset(&cp, 0, sizeof(cp));
2899 memcpy(&cp.lap, lap, sizeof(cp.lap));
2900 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2901 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2902
2903 hci_dev_lock(hdev);
2904
2905 hci_inquiry_cache_flush(hdev);
2906
2907 err = hci_req_run(&req, inquiry_complete);
2908 if (err) {
2909 BT_ERR("Inquiry request failed: err %d", err);
2910 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2911 }
2912
2913 hci_dev_unlock(hdev);
2914 break;
2915 }
2916 }
2917
2918 static void le_scan_disable_work(struct work_struct *work)
2919 {
2920 struct hci_dev *hdev = container_of(work, struct hci_dev,
2921 le_scan_disable.work);
2922 struct hci_request req;
2923 int err;
2924
2925 BT_DBG("%s", hdev->name);
2926
2927 cancel_delayed_work_sync(&hdev->le_scan_restart);
2928
2929 hci_req_init(&req, hdev);
2930
2931 hci_req_add_le_scan_disable(&req);
2932
2933 err = hci_req_run(&req, le_scan_disable_work_complete);
2934 if (err)
2935 BT_ERR("Disable LE scanning request failed: err %d", err);
2936 }
2937
2938 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2939 u16 opcode)
2940 {
2941 unsigned long timeout, duration, scan_start, now;
2942
2943 BT_DBG("%s", hdev->name);
2944
2945 if (status) {
2946 BT_ERR("Failed to restart LE scan: status %d", status);
2947 return;
2948 }
2949
2950 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2951 !hdev->discovery.scan_start)
2952 return;
2953
2954 /* When the scan was started, hdev->le_scan_disable has been queued
2955 * after duration from scan_start. During scan restart this job
2956 * has been canceled, and we need to queue it again after proper
2957 * timeout, to make sure that scan does not run indefinitely.
2958 */
2959 duration = hdev->discovery.scan_duration;
2960 scan_start = hdev->discovery.scan_start;
2961 now = jiffies;
2962 if (now - scan_start <= duration) {
2963 int elapsed;
2964
2965 if (now >= scan_start)
2966 elapsed = now - scan_start;
2967 else
2968 elapsed = ULONG_MAX - scan_start + now;
2969
2970 timeout = duration - elapsed;
2971 } else {
2972 timeout = 0;
2973 }
2974 queue_delayed_work(hdev->workqueue,
2975 &hdev->le_scan_disable, timeout);
2976 }
2977
2978 static void le_scan_restart_work(struct work_struct *work)
2979 {
2980 struct hci_dev *hdev = container_of(work, struct hci_dev,
2981 le_scan_restart.work);
2982 struct hci_request req;
2983 struct hci_cp_le_set_scan_enable cp;
2984 int err;
2985
2986 BT_DBG("%s", hdev->name);
2987
2988 /* If controller is not scanning we are done. */
2989 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2990 return;
2991
2992 hci_req_init(&req, hdev);
2993
2994 hci_req_add_le_scan_disable(&req);
2995
2996 memset(&cp, 0, sizeof(cp));
2997 cp.enable = LE_SCAN_ENABLE;
2998 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2999 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3000
3001 err = hci_req_run(&req, le_scan_restart_work_complete);
3002 if (err)
3003 BT_ERR("Restart LE scan request failed: err %d", err);
3004 }
3005
3006 /* Copy the Identity Address of the controller.
3007 *
3008 * If the controller has a public BD_ADDR, then by default use that one.
3009 * If this is a LE only controller without a public address, default to
3010 * the static random address.
3011 *
3012 * For debugging purposes it is possible to force controllers with a
3013 * public address to use the static random address instead.
3014 *
3015 * In case BR/EDR has been disabled on a dual-mode controller and
3016 * userspace has configured a static address, then that address
3017 * becomes the identity address instead of the public BR/EDR address.
3018 */
3019 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3020 u8 *bdaddr_type)
3021 {
3022 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3023 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3024 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3025 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3026 bacpy(bdaddr, &hdev->static_addr);
3027 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3028 } else {
3029 bacpy(bdaddr, &hdev->bdaddr);
3030 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3031 }
3032 }
3033
3034 /* Alloc HCI device */
3035 struct hci_dev *hci_alloc_dev(void)
3036 {
3037 struct hci_dev *hdev;
3038
3039 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3040 if (!hdev)
3041 return NULL;
3042
3043 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3044 hdev->esco_type = (ESCO_HV1);
3045 hdev->link_mode = (HCI_LM_ACCEPT);
3046 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3047 hdev->io_capability = 0x03; /* No Input No Output */
3048 hdev->manufacturer = 0xffff; /* Default to internal use */
3049 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3050 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3051
3052 hdev->sniff_max_interval = 800;
3053 hdev->sniff_min_interval = 80;
3054
3055 hdev->le_adv_channel_map = 0x07;
3056 hdev->le_adv_min_interval = 0x0800;
3057 hdev->le_adv_max_interval = 0x0800;
3058 hdev->le_scan_interval = 0x0060;
3059 hdev->le_scan_window = 0x0030;
3060 hdev->le_conn_min_interval = 0x0028;
3061 hdev->le_conn_max_interval = 0x0038;
3062 hdev->le_conn_latency = 0x0000;
3063 hdev->le_supv_timeout = 0x002a;
3064 hdev->le_def_tx_len = 0x001b;
3065 hdev->le_def_tx_time = 0x0148;
3066 hdev->le_max_tx_len = 0x001b;
3067 hdev->le_max_tx_time = 0x0148;
3068 hdev->le_max_rx_len = 0x001b;
3069 hdev->le_max_rx_time = 0x0148;
3070
3071 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3072 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3073 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3074 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3075
3076 mutex_init(&hdev->lock);
3077 mutex_init(&hdev->req_lock);
3078
3079 INIT_LIST_HEAD(&hdev->mgmt_pending);
3080 INIT_LIST_HEAD(&hdev->blacklist);
3081 INIT_LIST_HEAD(&hdev->whitelist);
3082 INIT_LIST_HEAD(&hdev->uuids);
3083 INIT_LIST_HEAD(&hdev->link_keys);
3084 INIT_LIST_HEAD(&hdev->long_term_keys);
3085 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3086 INIT_LIST_HEAD(&hdev->remote_oob_data);
3087 INIT_LIST_HEAD(&hdev->le_white_list);
3088 INIT_LIST_HEAD(&hdev->le_conn_params);
3089 INIT_LIST_HEAD(&hdev->pend_le_conns);
3090 INIT_LIST_HEAD(&hdev->pend_le_reports);
3091 INIT_LIST_HEAD(&hdev->conn_hash.list);
3092
3093 INIT_WORK(&hdev->rx_work, hci_rx_work);
3094 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3095 INIT_WORK(&hdev->tx_work, hci_tx_work);
3096 INIT_WORK(&hdev->power_on, hci_power_on);
3097 INIT_WORK(&hdev->error_reset, hci_error_reset);
3098
3099 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3100 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3101 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3102 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3103
3104 skb_queue_head_init(&hdev->rx_q);
3105 skb_queue_head_init(&hdev->cmd_q);
3106 skb_queue_head_init(&hdev->raw_q);
3107
3108 init_waitqueue_head(&hdev->req_wait_q);
3109
3110 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3111
3112 hci_init_sysfs(hdev);
3113 discovery_init(hdev);
3114
3115 return hdev;
3116 }
3117 EXPORT_SYMBOL(hci_alloc_dev);
3118
3119 /* Free HCI device */
3120 void hci_free_dev(struct hci_dev *hdev)
3121 {
3122 /* will free via device release */
3123 put_device(&hdev->dev);
3124 }
3125 EXPORT_SYMBOL(hci_free_dev);
3126
3127 /* Register HCI device */
3128 int hci_register_dev(struct hci_dev *hdev)
3129 {
3130 int id, error;
3131
3132 if (!hdev->open || !hdev->close || !hdev->send)
3133 return -EINVAL;
3134
3135 /* Do not allow HCI_AMP devices to register at index 0,
3136 * so the index can be used as the AMP controller ID.
3137 */
3138 switch (hdev->dev_type) {
3139 case HCI_BREDR:
3140 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3141 break;
3142 case HCI_AMP:
3143 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3144 break;
3145 default:
3146 return -EINVAL;
3147 }
3148
3149 if (id < 0)
3150 return id;
3151
3152 sprintf(hdev->name, "hci%d", id);
3153 hdev->id = id;
3154
3155 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3156
3157 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3158 WQ_MEM_RECLAIM, 1, hdev->name);
3159 if (!hdev->workqueue) {
3160 error = -ENOMEM;
3161 goto err;
3162 }
3163
3164 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3165 WQ_MEM_RECLAIM, 1, hdev->name);
3166 if (!hdev->req_workqueue) {
3167 destroy_workqueue(hdev->workqueue);
3168 error = -ENOMEM;
3169 goto err;
3170 }
3171
3172 if (!IS_ERR_OR_NULL(bt_debugfs))
3173 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3174
3175 dev_set_name(&hdev->dev, "%s", hdev->name);
3176
3177 error = device_add(&hdev->dev);
3178 if (error < 0)
3179 goto err_wqueue;
3180
3181 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3182 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3183 hdev);
3184 if (hdev->rfkill) {
3185 if (rfkill_register(hdev->rfkill) < 0) {
3186 rfkill_destroy(hdev->rfkill);
3187 hdev->rfkill = NULL;
3188 }
3189 }
3190
3191 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3192 hci_dev_set_flag(hdev, HCI_RFKILLED);
3193
3194 hci_dev_set_flag(hdev, HCI_SETUP);
3195 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3196
3197 if (hdev->dev_type == HCI_BREDR) {
3198 /* Assume BR/EDR support until proven otherwise (such as
3199 * through reading supported features during init.
3200 */
3201 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3202 }
3203
3204 write_lock(&hci_dev_list_lock);
3205 list_add(&hdev->list, &hci_dev_list);
3206 write_unlock(&hci_dev_list_lock);
3207
3208 /* Devices that are marked for raw-only usage are unconfigured
3209 * and should not be included in normal operation.
3210 */
3211 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3212 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3213
3214 hci_notify(hdev, HCI_DEV_REG);
3215 hci_dev_hold(hdev);
3216
3217 queue_work(hdev->req_workqueue, &hdev->power_on);
3218
3219 return id;
3220
3221 err_wqueue:
3222 destroy_workqueue(hdev->workqueue);
3223 destroy_workqueue(hdev->req_workqueue);
3224 err:
3225 ida_simple_remove(&hci_index_ida, hdev->id);
3226
3227 return error;
3228 }
3229 EXPORT_SYMBOL(hci_register_dev);
3230
3231 /* Unregister HCI device */
3232 void hci_unregister_dev(struct hci_dev *hdev)
3233 {
3234 int i, id;
3235
3236 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3237
3238 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3239
3240 id = hdev->id;
3241
3242 write_lock(&hci_dev_list_lock);
3243 list_del(&hdev->list);
3244 write_unlock(&hci_dev_list_lock);
3245
3246 hci_dev_do_close(hdev);
3247
3248 for (i = 0; i < NUM_REASSEMBLY; i++)
3249 kfree_skb(hdev->reassembly[i]);
3250
3251 cancel_work_sync(&hdev->power_on);
3252
3253 if (!test_bit(HCI_INIT, &hdev->flags) &&
3254 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3255 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3256 hci_dev_lock(hdev);
3257 mgmt_index_removed(hdev);
3258 hci_dev_unlock(hdev);
3259 }
3260
3261 /* mgmt_index_removed should take care of emptying the
3262 * pending list */
3263 BUG_ON(!list_empty(&hdev->mgmt_pending));
3264
3265 hci_notify(hdev, HCI_DEV_UNREG);
3266
3267 if (hdev->rfkill) {
3268 rfkill_unregister(hdev->rfkill);
3269 rfkill_destroy(hdev->rfkill);
3270 }
3271
3272 device_del(&hdev->dev);
3273
3274 debugfs_remove_recursive(hdev->debugfs);
3275
3276 destroy_workqueue(hdev->workqueue);
3277 destroy_workqueue(hdev->req_workqueue);
3278
3279 hci_dev_lock(hdev);
3280 hci_bdaddr_list_clear(&hdev->blacklist);
3281 hci_bdaddr_list_clear(&hdev->whitelist);
3282 hci_uuids_clear(hdev);
3283 hci_link_keys_clear(hdev);
3284 hci_smp_ltks_clear(hdev);
3285 hci_smp_irks_clear(hdev);
3286 hci_remote_oob_data_clear(hdev);
3287 hci_bdaddr_list_clear(&hdev->le_white_list);
3288 hci_conn_params_clear_all(hdev);
3289 hci_discovery_filter_clear(hdev);
3290 hci_dev_unlock(hdev);
3291
3292 hci_dev_put(hdev);
3293
3294 ida_simple_remove(&hci_index_ida, id);
3295 }
3296 EXPORT_SYMBOL(hci_unregister_dev);
3297
3298 /* Suspend HCI device */
3299 int hci_suspend_dev(struct hci_dev *hdev)
3300 {
3301 hci_notify(hdev, HCI_DEV_SUSPEND);
3302 return 0;
3303 }
3304 EXPORT_SYMBOL(hci_suspend_dev);
3305
3306 /* Resume HCI device */
3307 int hci_resume_dev(struct hci_dev *hdev)
3308 {
3309 hci_notify(hdev, HCI_DEV_RESUME);
3310 return 0;
3311 }
3312 EXPORT_SYMBOL(hci_resume_dev);
3313
3314 /* Reset HCI device */
3315 int hci_reset_dev(struct hci_dev *hdev)
3316 {
3317 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3318 struct sk_buff *skb;
3319
3320 skb = bt_skb_alloc(3, GFP_ATOMIC);
3321 if (!skb)
3322 return -ENOMEM;
3323
3324 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3325 memcpy(skb_put(skb, 3), hw_err, 3);
3326
3327 /* Send Hardware Error to upper stack */
3328 return hci_recv_frame(hdev, skb);
3329 }
3330 EXPORT_SYMBOL(hci_reset_dev);
3331
3332 /* Receive frame from HCI drivers */
3333 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3334 {
3335 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3336 && !test_bit(HCI_INIT, &hdev->flags))) {
3337 kfree_skb(skb);
3338 return -ENXIO;
3339 }
3340
3341 /* Incoming skb */
3342 bt_cb(skb)->incoming = 1;
3343
3344 /* Time stamp */
3345 __net_timestamp(skb);
3346
3347 skb_queue_tail(&hdev->rx_q, skb);
3348 queue_work(hdev->workqueue, &hdev->rx_work);
3349
3350 return 0;
3351 }
3352 EXPORT_SYMBOL(hci_recv_frame);
3353
3354 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3355 int count, __u8 index)
3356 {
3357 int len = 0;
3358 int hlen = 0;
3359 int remain = count;
3360 struct sk_buff *skb;
3361 struct bt_skb_cb *scb;
3362
3363 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3364 index >= NUM_REASSEMBLY)
3365 return -EILSEQ;
3366
3367 skb = hdev->reassembly[index];
3368
3369 if (!skb) {
3370 switch (type) {
3371 case HCI_ACLDATA_PKT:
3372 len = HCI_MAX_FRAME_SIZE;
3373 hlen = HCI_ACL_HDR_SIZE;
3374 break;
3375 case HCI_EVENT_PKT:
3376 len = HCI_MAX_EVENT_SIZE;
3377 hlen = HCI_EVENT_HDR_SIZE;
3378 break;
3379 case HCI_SCODATA_PKT:
3380 len = HCI_MAX_SCO_SIZE;
3381 hlen = HCI_SCO_HDR_SIZE;
3382 break;
3383 }
3384
3385 skb = bt_skb_alloc(len, GFP_ATOMIC);
3386 if (!skb)
3387 return -ENOMEM;
3388
3389 scb = (void *) skb->cb;
3390 scb->expect = hlen;
3391 scb->pkt_type = type;
3392
3393 hdev->reassembly[index] = skb;
3394 }
3395
3396 while (count) {
3397 scb = (void *) skb->cb;
3398 len = min_t(uint, scb->expect, count);
3399
3400 memcpy(skb_put(skb, len), data, len);
3401
3402 count -= len;
3403 data += len;
3404 scb->expect -= len;
3405 remain = count;
3406
3407 switch (type) {
3408 case HCI_EVENT_PKT:
3409 if (skb->len == HCI_EVENT_HDR_SIZE) {
3410 struct hci_event_hdr *h = hci_event_hdr(skb);
3411 scb->expect = h->plen;
3412
3413 if (skb_tailroom(skb) < scb->expect) {
3414 kfree_skb(skb);
3415 hdev->reassembly[index] = NULL;
3416 return -ENOMEM;
3417 }
3418 }
3419 break;
3420
3421 case HCI_ACLDATA_PKT:
3422 if (skb->len == HCI_ACL_HDR_SIZE) {
3423 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3424 scb->expect = __le16_to_cpu(h->dlen);
3425
3426 if (skb_tailroom(skb) < scb->expect) {
3427 kfree_skb(skb);
3428 hdev->reassembly[index] = NULL;
3429 return -ENOMEM;
3430 }
3431 }
3432 break;
3433
3434 case HCI_SCODATA_PKT:
3435 if (skb->len == HCI_SCO_HDR_SIZE) {
3436 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3437 scb->expect = h->dlen;
3438
3439 if (skb_tailroom(skb) < scb->expect) {
3440 kfree_skb(skb);
3441 hdev->reassembly[index] = NULL;
3442 return -ENOMEM;
3443 }
3444 }
3445 break;
3446 }
3447
3448 if (scb->expect == 0) {
3449 /* Complete frame */
3450
3451 bt_cb(skb)->pkt_type = type;
3452 hci_recv_frame(hdev, skb);
3453
3454 hdev->reassembly[index] = NULL;
3455 return remain;
3456 }
3457 }
3458
3459 return remain;
3460 }
3461
3462 #define STREAM_REASSEMBLY 0
3463
3464 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3465 {
3466 int type;
3467 int rem = 0;
3468
3469 while (count) {
3470 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3471
3472 if (!skb) {
3473 struct { char type; } *pkt;
3474
3475 /* Start of the frame */
3476 pkt = data;
3477 type = pkt->type;
3478
3479 data++;
3480 count--;
3481 } else
3482 type = bt_cb(skb)->pkt_type;
3483
3484 rem = hci_reassembly(hdev, type, data, count,
3485 STREAM_REASSEMBLY);
3486 if (rem < 0)
3487 return rem;
3488
3489 data += (count - rem);
3490 count = rem;
3491 }
3492
3493 return rem;
3494 }
3495 EXPORT_SYMBOL(hci_recv_stream_fragment);
3496
3497 /* ---- Interface to upper protocols ---- */
3498
3499 int hci_register_cb(struct hci_cb *cb)
3500 {
3501 BT_DBG("%p name %s", cb, cb->name);
3502
3503 mutex_lock(&hci_cb_list_lock);
3504 list_add_tail(&cb->list, &hci_cb_list);
3505 mutex_unlock(&hci_cb_list_lock);
3506
3507 return 0;
3508 }
3509 EXPORT_SYMBOL(hci_register_cb);
3510
3511 int hci_unregister_cb(struct hci_cb *cb)
3512 {
3513 BT_DBG("%p name %s", cb, cb->name);
3514
3515 mutex_lock(&hci_cb_list_lock);
3516 list_del(&cb->list);
3517 mutex_unlock(&hci_cb_list_lock);
3518
3519 return 0;
3520 }
3521 EXPORT_SYMBOL(hci_unregister_cb);
3522
3523 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3524 {
3525 int err;
3526
3527 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3528
3529 /* Time stamp */
3530 __net_timestamp(skb);
3531
3532 /* Send copy to monitor */
3533 hci_send_to_monitor(hdev, skb);
3534
3535 if (atomic_read(&hdev->promisc)) {
3536 /* Send copy to the sockets */
3537 hci_send_to_sock(hdev, skb);
3538 }
3539
3540 /* Get rid of skb owner, prior to sending to the driver. */
3541 skb_orphan(skb);
3542
3543 err = hdev->send(hdev, skb);
3544 if (err < 0) {
3545 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3546 kfree_skb(skb);
3547 }
3548 }
3549
3550 bool hci_req_pending(struct hci_dev *hdev)
3551 {
3552 return (hdev->req_status == HCI_REQ_PEND);
3553 }
3554
3555 /* Send HCI command */
3556 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3557 const void *param)
3558 {
3559 struct sk_buff *skb;
3560
3561 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3562
3563 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3564 if (!skb) {
3565 BT_ERR("%s no memory for command", hdev->name);
3566 return -ENOMEM;
3567 }
3568
3569 /* Stand-alone HCI commands must be flagged as
3570 * single-command requests.
3571 */
3572 bt_cb(skb)->req_start = 1;
3573
3574 skb_queue_tail(&hdev->cmd_q, skb);
3575 queue_work(hdev->workqueue, &hdev->cmd_work);
3576
3577 return 0;
3578 }
3579
3580 /* Get data from the previously sent command */
3581 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3582 {
3583 struct hci_command_hdr *hdr;
3584
3585 if (!hdev->sent_cmd)
3586 return NULL;
3587
3588 hdr = (void *) hdev->sent_cmd->data;
3589
3590 if (hdr->opcode != cpu_to_le16(opcode))
3591 return NULL;
3592
3593 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3594
3595 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3596 }
3597
3598 /* Send ACL data */
3599 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3600 {
3601 struct hci_acl_hdr *hdr;
3602 int len = skb->len;
3603
3604 skb_push(skb, HCI_ACL_HDR_SIZE);
3605 skb_reset_transport_header(skb);
3606 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3607 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3608 hdr->dlen = cpu_to_le16(len);
3609 }
3610
3611 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3612 struct sk_buff *skb, __u16 flags)
3613 {
3614 struct hci_conn *conn = chan->conn;
3615 struct hci_dev *hdev = conn->hdev;
3616 struct sk_buff *list;
3617
3618 skb->len = skb_headlen(skb);
3619 skb->data_len = 0;
3620
3621 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3622
3623 switch (hdev->dev_type) {
3624 case HCI_BREDR:
3625 hci_add_acl_hdr(skb, conn->handle, flags);
3626 break;
3627 case HCI_AMP:
3628 hci_add_acl_hdr(skb, chan->handle, flags);
3629 break;
3630 default:
3631 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3632 return;
3633 }
3634
3635 list = skb_shinfo(skb)->frag_list;
3636 if (!list) {
3637 /* Non fragmented */
3638 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3639
3640 skb_queue_tail(queue, skb);
3641 } else {
3642 /* Fragmented */
3643 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3644
3645 skb_shinfo(skb)->frag_list = NULL;
3646
3647 /* Queue all fragments atomically. We need to use spin_lock_bh
3648 * here because of 6LoWPAN links, as there this function is
3649 * called from softirq and using normal spin lock could cause
3650 * deadlocks.
3651 */
3652 spin_lock_bh(&queue->lock);
3653
3654 __skb_queue_tail(queue, skb);
3655
3656 flags &= ~ACL_START;
3657 flags |= ACL_CONT;
3658 do {
3659 skb = list; list = list->next;
3660
3661 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3662 hci_add_acl_hdr(skb, conn->handle, flags);
3663
3664 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3665
3666 __skb_queue_tail(queue, skb);
3667 } while (list);
3668
3669 spin_unlock_bh(&queue->lock);
3670 }
3671 }
3672
3673 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3674 {
3675 struct hci_dev *hdev = chan->conn->hdev;
3676
3677 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3678
3679 hci_queue_acl(chan, &chan->data_q, skb, flags);
3680
3681 queue_work(hdev->workqueue, &hdev->tx_work);
3682 }
3683
3684 /* Send SCO data */
3685 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3686 {
3687 struct hci_dev *hdev = conn->hdev;
3688 struct hci_sco_hdr hdr;
3689
3690 BT_DBG("%s len %d", hdev->name, skb->len);
3691
3692 hdr.handle = cpu_to_le16(conn->handle);
3693 hdr.dlen = skb->len;
3694
3695 skb_push(skb, HCI_SCO_HDR_SIZE);
3696 skb_reset_transport_header(skb);
3697 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3698
3699 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3700
3701 skb_queue_tail(&conn->data_q, skb);
3702 queue_work(hdev->workqueue, &hdev->tx_work);
3703 }
3704
3705 /* ---- HCI TX task (outgoing data) ---- */
3706
3707 /* HCI Connection scheduler */
3708 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3709 int *quote)
3710 {
3711 struct hci_conn_hash *h = &hdev->conn_hash;
3712 struct hci_conn *conn = NULL, *c;
3713 unsigned int num = 0, min = ~0;
3714
3715 /* We don't have to lock device here. Connections are always
3716 * added and removed with TX task disabled. */
3717
3718 rcu_read_lock();
3719
3720 list_for_each_entry_rcu(c, &h->list, list) {
3721 if (c->type != type || skb_queue_empty(&c->data_q))
3722 continue;
3723
3724 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3725 continue;
3726
3727 num++;
3728
3729 if (c->sent < min) {
3730 min = c->sent;
3731 conn = c;
3732 }
3733
3734 if (hci_conn_num(hdev, type) == num)
3735 break;
3736 }
3737
3738 rcu_read_unlock();
3739
3740 if (conn) {
3741 int cnt, q;
3742
3743 switch (conn->type) {
3744 case ACL_LINK:
3745 cnt = hdev->acl_cnt;
3746 break;
3747 case SCO_LINK:
3748 case ESCO_LINK:
3749 cnt = hdev->sco_cnt;
3750 break;
3751 case LE_LINK:
3752 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3753 break;
3754 default:
3755 cnt = 0;
3756 BT_ERR("Unknown link type");
3757 }
3758
3759 q = cnt / num;
3760 *quote = q ? q : 1;
3761 } else
3762 *quote = 0;
3763
3764 BT_DBG("conn %p quote %d", conn, *quote);
3765 return conn;
3766 }
3767
3768 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3769 {
3770 struct hci_conn_hash *h = &hdev->conn_hash;
3771 struct hci_conn *c;
3772
3773 BT_ERR("%s link tx timeout", hdev->name);
3774
3775 rcu_read_lock();
3776
3777 /* Kill stalled connections */
3778 list_for_each_entry_rcu(c, &h->list, list) {
3779 if (c->type == type && c->sent) {
3780 BT_ERR("%s killing stalled connection %pMR",
3781 hdev->name, &c->dst);
3782 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3783 }
3784 }
3785
3786 rcu_read_unlock();
3787 }
3788
3789 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3790 int *quote)
3791 {
3792 struct hci_conn_hash *h = &hdev->conn_hash;
3793 struct hci_chan *chan = NULL;
3794 unsigned int num = 0, min = ~0, cur_prio = 0;
3795 struct hci_conn *conn;
3796 int cnt, q, conn_num = 0;
3797
3798 BT_DBG("%s", hdev->name);
3799
3800 rcu_read_lock();
3801
3802 list_for_each_entry_rcu(conn, &h->list, list) {
3803 struct hci_chan *tmp;
3804
3805 if (conn->type != type)
3806 continue;
3807
3808 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3809 continue;
3810
3811 conn_num++;
3812
3813 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3814 struct sk_buff *skb;
3815
3816 if (skb_queue_empty(&tmp->data_q))
3817 continue;
3818
3819 skb = skb_peek(&tmp->data_q);
3820 if (skb->priority < cur_prio)
3821 continue;
3822
3823 if (skb->priority > cur_prio) {
3824 num = 0;
3825 min = ~0;
3826 cur_prio = skb->priority;
3827 }
3828
3829 num++;
3830
3831 if (conn->sent < min) {
3832 min = conn->sent;
3833 chan = tmp;
3834 }
3835 }
3836
3837 if (hci_conn_num(hdev, type) == conn_num)
3838 break;
3839 }
3840
3841 rcu_read_unlock();
3842
3843 if (!chan)
3844 return NULL;
3845
3846 switch (chan->conn->type) {
3847 case ACL_LINK:
3848 cnt = hdev->acl_cnt;
3849 break;
3850 case AMP_LINK:
3851 cnt = hdev->block_cnt;
3852 break;
3853 case SCO_LINK:
3854 case ESCO_LINK:
3855 cnt = hdev->sco_cnt;
3856 break;
3857 case LE_LINK:
3858 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3859 break;
3860 default:
3861 cnt = 0;
3862 BT_ERR("Unknown link type");
3863 }
3864
3865 q = cnt / num;
3866 *quote = q ? q : 1;
3867 BT_DBG("chan %p quote %d", chan, *quote);
3868 return chan;
3869 }
3870
3871 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3872 {
3873 struct hci_conn_hash *h = &hdev->conn_hash;
3874 struct hci_conn *conn;
3875 int num = 0;
3876
3877 BT_DBG("%s", hdev->name);
3878
3879 rcu_read_lock();
3880
3881 list_for_each_entry_rcu(conn, &h->list, list) {
3882 struct hci_chan *chan;
3883
3884 if (conn->type != type)
3885 continue;
3886
3887 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3888 continue;
3889
3890 num++;
3891
3892 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3893 struct sk_buff *skb;
3894
3895 if (chan->sent) {
3896 chan->sent = 0;
3897 continue;
3898 }
3899
3900 if (skb_queue_empty(&chan->data_q))
3901 continue;
3902
3903 skb = skb_peek(&chan->data_q);
3904 if (skb->priority >= HCI_PRIO_MAX - 1)
3905 continue;
3906
3907 skb->priority = HCI_PRIO_MAX - 1;
3908
3909 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3910 skb->priority);
3911 }
3912
3913 if (hci_conn_num(hdev, type) == num)
3914 break;
3915 }
3916
3917 rcu_read_unlock();
3918
3919 }
3920
3921 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3922 {
3923 /* Calculate count of blocks used by this packet */
3924 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3925 }
3926
3927 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3928 {
3929 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3930 /* ACL tx timeout must be longer than maximum
3931 * link supervision timeout (40.9 seconds) */
3932 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3933 HCI_ACL_TX_TIMEOUT))
3934 hci_link_tx_to(hdev, ACL_LINK);
3935 }
3936 }
3937
3938 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3939 {
3940 unsigned int cnt = hdev->acl_cnt;
3941 struct hci_chan *chan;
3942 struct sk_buff *skb;
3943 int quote;
3944
3945 __check_timeout(hdev, cnt);
3946
3947 while (hdev->acl_cnt &&
3948 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3949 u32 priority = (skb_peek(&chan->data_q))->priority;
3950 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3951 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3952 skb->len, skb->priority);
3953
3954 /* Stop if priority has changed */
3955 if (skb->priority < priority)
3956 break;
3957
3958 skb = skb_dequeue(&chan->data_q);
3959
3960 hci_conn_enter_active_mode(chan->conn,
3961 bt_cb(skb)->force_active);
3962
3963 hci_send_frame(hdev, skb);
3964 hdev->acl_last_tx = jiffies;
3965
3966 hdev->acl_cnt--;
3967 chan->sent++;
3968 chan->conn->sent++;
3969 }
3970 }
3971
3972 if (cnt != hdev->acl_cnt)
3973 hci_prio_recalculate(hdev, ACL_LINK);
3974 }
3975
3976 static void hci_sched_acl_blk(struct hci_dev *hdev)
3977 {
3978 unsigned int cnt = hdev->block_cnt;
3979 struct hci_chan *chan;
3980 struct sk_buff *skb;
3981 int quote;
3982 u8 type;
3983
3984 __check_timeout(hdev, cnt);
3985
3986 BT_DBG("%s", hdev->name);
3987
3988 if (hdev->dev_type == HCI_AMP)
3989 type = AMP_LINK;
3990 else
3991 type = ACL_LINK;
3992
3993 while (hdev->block_cnt > 0 &&
3994 (chan = hci_chan_sent(hdev, type, &quote))) {
3995 u32 priority = (skb_peek(&chan->data_q))->priority;
3996 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3997 int blocks;
3998
3999 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4000 skb->len, skb->priority);
4001
4002 /* Stop if priority has changed */
4003 if (skb->priority < priority)
4004 break;
4005
4006 skb = skb_dequeue(&chan->data_q);
4007
4008 blocks = __get_blocks(hdev, skb);
4009 if (blocks > hdev->block_cnt)
4010 return;
4011
4012 hci_conn_enter_active_mode(chan->conn,
4013 bt_cb(skb)->force_active);
4014
4015 hci_send_frame(hdev, skb);
4016 hdev->acl_last_tx = jiffies;
4017
4018 hdev->block_cnt -= blocks;
4019 quote -= blocks;
4020
4021 chan->sent += blocks;
4022 chan->conn->sent += blocks;
4023 }
4024 }
4025
4026 if (cnt != hdev->block_cnt)
4027 hci_prio_recalculate(hdev, type);
4028 }
4029
4030 static void hci_sched_acl(struct hci_dev *hdev)
4031 {
4032 BT_DBG("%s", hdev->name);
4033
4034 /* No ACL link over BR/EDR controller */
4035 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4036 return;
4037
4038 /* No AMP link over AMP controller */
4039 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4040 return;
4041
4042 switch (hdev->flow_ctl_mode) {
4043 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4044 hci_sched_acl_pkt(hdev);
4045 break;
4046
4047 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4048 hci_sched_acl_blk(hdev);
4049 break;
4050 }
4051 }
4052
4053 /* Schedule SCO */
4054 static void hci_sched_sco(struct hci_dev *hdev)
4055 {
4056 struct hci_conn *conn;
4057 struct sk_buff *skb;
4058 int quote;
4059
4060 BT_DBG("%s", hdev->name);
4061
4062 if (!hci_conn_num(hdev, SCO_LINK))
4063 return;
4064
4065 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4066 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4067 BT_DBG("skb %p len %d", skb, skb->len);
4068 hci_send_frame(hdev, skb);
4069
4070 conn->sent++;
4071 if (conn->sent == ~0)
4072 conn->sent = 0;
4073 }
4074 }
4075 }
4076
4077 static void hci_sched_esco(struct hci_dev *hdev)
4078 {
4079 struct hci_conn *conn;
4080 struct sk_buff *skb;
4081 int quote;
4082
4083 BT_DBG("%s", hdev->name);
4084
4085 if (!hci_conn_num(hdev, ESCO_LINK))
4086 return;
4087
4088 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4089 &quote))) {
4090 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4091 BT_DBG("skb %p len %d", skb, skb->len);
4092 hci_send_frame(hdev, skb);
4093
4094 conn->sent++;
4095 if (conn->sent == ~0)
4096 conn->sent = 0;
4097 }
4098 }
4099 }
4100
4101 static void hci_sched_le(struct hci_dev *hdev)
4102 {
4103 struct hci_chan *chan;
4104 struct sk_buff *skb;
4105 int quote, cnt, tmp;
4106
4107 BT_DBG("%s", hdev->name);
4108
4109 if (!hci_conn_num(hdev, LE_LINK))
4110 return;
4111
4112 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4113 /* LE tx timeout must be longer than maximum
4114 * link supervision timeout (40.9 seconds) */
4115 if (!hdev->le_cnt && hdev->le_pkts &&
4116 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4117 hci_link_tx_to(hdev, LE_LINK);
4118 }
4119
4120 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4121 tmp = cnt;
4122 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4123 u32 priority = (skb_peek(&chan->data_q))->priority;
4124 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4125 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4126 skb->len, skb->priority);
4127
4128 /* Stop if priority has changed */
4129 if (skb->priority < priority)
4130 break;
4131
4132 skb = skb_dequeue(&chan->data_q);
4133
4134 hci_send_frame(hdev, skb);
4135 hdev->le_last_tx = jiffies;
4136
4137 cnt--;
4138 chan->sent++;
4139 chan->conn->sent++;
4140 }
4141 }
4142
4143 if (hdev->le_pkts)
4144 hdev->le_cnt = cnt;
4145 else
4146 hdev->acl_cnt = cnt;
4147
4148 if (cnt != tmp)
4149 hci_prio_recalculate(hdev, LE_LINK);
4150 }
4151
4152 static void hci_tx_work(struct work_struct *work)
4153 {
4154 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4155 struct sk_buff *skb;
4156
4157 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4158 hdev->sco_cnt, hdev->le_cnt);
4159
4160 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4161 /* Schedule queues and send stuff to HCI driver */
4162 hci_sched_acl(hdev);
4163 hci_sched_sco(hdev);
4164 hci_sched_esco(hdev);
4165 hci_sched_le(hdev);
4166 }
4167
4168 /* Send next queued raw (unknown type) packet */
4169 while ((skb = skb_dequeue(&hdev->raw_q)))
4170 hci_send_frame(hdev, skb);
4171 }
4172
4173 /* ----- HCI RX task (incoming data processing) ----- */
4174
4175 /* ACL data packet */
4176 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4177 {
4178 struct hci_acl_hdr *hdr = (void *) skb->data;
4179 struct hci_conn *conn;
4180 __u16 handle, flags;
4181
4182 skb_pull(skb, HCI_ACL_HDR_SIZE);
4183
4184 handle = __le16_to_cpu(hdr->handle);
4185 flags = hci_flags(handle);
4186 handle = hci_handle(handle);
4187
4188 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4189 handle, flags);
4190
4191 hdev->stat.acl_rx++;
4192
4193 hci_dev_lock(hdev);
4194 conn = hci_conn_hash_lookup_handle(hdev, handle);
4195 hci_dev_unlock(hdev);
4196
4197 if (conn) {
4198 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4199
4200 /* Send to upper protocol */
4201 l2cap_recv_acldata(conn, skb, flags);
4202 return;
4203 } else {
4204 BT_ERR("%s ACL packet for unknown connection handle %d",
4205 hdev->name, handle);
4206 }
4207
4208 kfree_skb(skb);
4209 }
4210
4211 /* SCO data packet */
4212 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4213 {
4214 struct hci_sco_hdr *hdr = (void *) skb->data;
4215 struct hci_conn *conn;
4216 __u16 handle;
4217
4218 skb_pull(skb, HCI_SCO_HDR_SIZE);
4219
4220 handle = __le16_to_cpu(hdr->handle);
4221
4222 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4223
4224 hdev->stat.sco_rx++;
4225
4226 hci_dev_lock(hdev);
4227 conn = hci_conn_hash_lookup_handle(hdev, handle);
4228 hci_dev_unlock(hdev);
4229
4230 if (conn) {
4231 /* Send to upper protocol */
4232 sco_recv_scodata(conn, skb);
4233 return;
4234 } else {
4235 BT_ERR("%s SCO packet for unknown connection handle %d",
4236 hdev->name, handle);
4237 }
4238
4239 kfree_skb(skb);
4240 }
4241
4242 static bool hci_req_is_complete(struct hci_dev *hdev)
4243 {
4244 struct sk_buff *skb;
4245
4246 skb = skb_peek(&hdev->cmd_q);
4247 if (!skb)
4248 return true;
4249
4250 return bt_cb(skb)->req_start;
4251 }
4252
4253 static void hci_resend_last(struct hci_dev *hdev)
4254 {
4255 struct hci_command_hdr *sent;
4256 struct sk_buff *skb;
4257 u16 opcode;
4258
4259 if (!hdev->sent_cmd)
4260 return;
4261
4262 sent = (void *) hdev->sent_cmd->data;
4263 opcode = __le16_to_cpu(sent->opcode);
4264 if (opcode == HCI_OP_RESET)
4265 return;
4266
4267 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4268 if (!skb)
4269 return;
4270
4271 skb_queue_head(&hdev->cmd_q, skb);
4272 queue_work(hdev->workqueue, &hdev->cmd_work);
4273 }
4274
4275 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4276 {
4277 hci_req_complete_t req_complete = NULL;
4278 struct sk_buff *skb;
4279 unsigned long flags;
4280
4281 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4282
4283 /* If the completed command doesn't match the last one that was
4284 * sent we need to do special handling of it.
4285 */
4286 if (!hci_sent_cmd_data(hdev, opcode)) {
4287 /* Some CSR based controllers generate a spontaneous
4288 * reset complete event during init and any pending
4289 * command will never be completed. In such a case we
4290 * need to resend whatever was the last sent
4291 * command.
4292 */
4293 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4294 hci_resend_last(hdev);
4295
4296 return;
4297 }
4298
4299 /* If the command succeeded and there's still more commands in
4300 * this request the request is not yet complete.
4301 */
4302 if (!status && !hci_req_is_complete(hdev))
4303 return;
4304
4305 /* If this was the last command in a request the complete
4306 * callback would be found in hdev->sent_cmd instead of the
4307 * command queue (hdev->cmd_q).
4308 */
4309 if (hdev->sent_cmd) {
4310 req_complete = bt_cb(hdev->sent_cmd)->req_complete;
4311
4312 if (req_complete) {
4313 /* We must set the complete callback to NULL to
4314 * avoid calling the callback more than once if
4315 * this function gets called again.
4316 */
4317 bt_cb(hdev->sent_cmd)->req_complete = NULL;
4318
4319 goto call_complete;
4320 }
4321 }
4322
4323 /* Remove all pending commands belonging to this request */
4324 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4325 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4326 if (bt_cb(skb)->req_start) {
4327 __skb_queue_head(&hdev->cmd_q, skb);
4328 break;
4329 }
4330
4331 req_complete = bt_cb(skb)->req_complete;
4332 kfree_skb(skb);
4333 }
4334 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4335
4336 call_complete:
4337 if (req_complete)
4338 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4339 }
4340
4341 static void hci_rx_work(struct work_struct *work)
4342 {
4343 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4344 struct sk_buff *skb;
4345
4346 BT_DBG("%s", hdev->name);
4347
4348 while ((skb = skb_dequeue(&hdev->rx_q))) {
4349 /* Send copy to monitor */
4350 hci_send_to_monitor(hdev, skb);
4351
4352 if (atomic_read(&hdev->promisc)) {
4353 /* Send copy to the sockets */
4354 hci_send_to_sock(hdev, skb);
4355 }
4356
4357 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4358 kfree_skb(skb);
4359 continue;
4360 }
4361
4362 if (test_bit(HCI_INIT, &hdev->flags)) {
4363 /* Don't process data packets in this states. */
4364 switch (bt_cb(skb)->pkt_type) {
4365 case HCI_ACLDATA_PKT:
4366 case HCI_SCODATA_PKT:
4367 kfree_skb(skb);
4368 continue;
4369 }
4370 }
4371
4372 /* Process frame */
4373 switch (bt_cb(skb)->pkt_type) {
4374 case HCI_EVENT_PKT:
4375 BT_DBG("%s Event packet", hdev->name);
4376 hci_event_packet(hdev, skb);
4377 break;
4378
4379 case HCI_ACLDATA_PKT:
4380 BT_DBG("%s ACL data packet", hdev->name);
4381 hci_acldata_packet(hdev, skb);
4382 break;
4383
4384 case HCI_SCODATA_PKT:
4385 BT_DBG("%s SCO data packet", hdev->name);
4386 hci_scodata_packet(hdev, skb);
4387 break;
4388
4389 default:
4390 kfree_skb(skb);
4391 break;
4392 }
4393 }
4394 }
4395
4396 static void hci_cmd_work(struct work_struct *work)
4397 {
4398 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4399 struct sk_buff *skb;
4400
4401 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4402 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4403
4404 /* Send queued commands */
4405 if (atomic_read(&hdev->cmd_cnt)) {
4406 skb = skb_dequeue(&hdev->cmd_q);
4407 if (!skb)
4408 return;
4409
4410 kfree_skb(hdev->sent_cmd);
4411
4412 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4413 if (hdev->sent_cmd) {
4414 atomic_dec(&hdev->cmd_cnt);
4415 hci_send_frame(hdev, skb);
4416 if (test_bit(HCI_RESET, &hdev->flags))
4417 cancel_delayed_work(&hdev->cmd_timer);
4418 else
4419 schedule_delayed_work(&hdev->cmd_timer,
4420 HCI_CMD_TIMEOUT);
4421 } else {
4422 skb_queue_head(&hdev->cmd_q, skb);
4423 queue_work(hdev->workqueue, &hdev->cmd_work);
4424 }
4425 }
4426 }
This page took 0.125526 seconds and 5 git commands to generate.