2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct
*work
);
46 static void hci_cmd_work(struct work_struct
*work
);
47 static void hci_tx_work(struct work_struct
*work
);
50 LIST_HEAD(hci_dev_list
);
51 DEFINE_RWLOCK(hci_dev_list_lock
);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list
);
55 DEFINE_MUTEX(hci_cb_list_lock
);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida
);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
63 size_t count
, loff_t
*ppos
)
65 struct hci_dev
*hdev
= file
->private_data
;
68 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
74 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
75 size_t count
, loff_t
*ppos
)
77 struct hci_dev
*hdev
= file
->private_data
;
80 size_t buf_size
= min(count
, (sizeof(buf
)-1));
83 if (!test_bit(HCI_UP
, &hdev
->flags
))
86 if (copy_from_user(buf
, user_buf
, buf_size
))
90 if (strtobool(buf
, &enable
))
93 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
96 hci_req_sync_lock(hdev
);
98 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
101 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
103 hci_req_sync_unlock(hdev
);
110 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
115 static const struct file_operations dut_mode_fops
= {
117 .read
= dut_mode_read
,
118 .write
= dut_mode_write
,
119 .llseek
= default_llseek
,
122 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
123 size_t count
, loff_t
*ppos
)
125 struct hci_dev
*hdev
= file
->private_data
;
128 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
134 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
135 size_t count
, loff_t
*ppos
)
137 struct hci_dev
*hdev
= file
->private_data
;
139 size_t buf_size
= min(count
, (sizeof(buf
)-1));
143 if (copy_from_user(buf
, user_buf
, buf_size
))
146 buf
[buf_size
] = '\0';
147 if (strtobool(buf
, &enable
))
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
157 !test_bit(HCI_RUNNING
, &hdev
->flags
))
160 hci_req_sync_lock(hdev
);
161 err
= hdev
->set_diag(hdev
, enable
);
162 hci_req_sync_unlock(hdev
);
169 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
171 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
176 static const struct file_operations vendor_diag_fops
= {
178 .read
= vendor_diag_read
,
179 .write
= vendor_diag_write
,
180 .llseek
= default_llseek
,
183 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
185 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
189 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
193 static int hci_reset_req(struct hci_request
*req
, unsigned long opt
)
195 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
198 set_bit(HCI_RESET
, &req
->hdev
->flags
);
199 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
203 static void bredr_init(struct hci_request
*req
)
205 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
207 /* Read Local Supported Features */
208 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
210 /* Read Local Version */
211 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
213 /* Read BD Address */
214 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
217 static void amp_init1(struct hci_request
*req
)
219 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
221 /* Read Local Version */
222 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
224 /* Read Local Supported Commands */
225 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
227 /* Read Local AMP Info */
228 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
230 /* Read Data Blk size */
231 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
233 /* Read Flow Control Mode */
234 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
236 /* Read Location Data */
237 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
240 static int amp_init2(struct hci_request
*req
)
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
246 if (req
->hdev
->commands
[14] & 0x20)
247 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
252 static int hci_init1_req(struct hci_request
*req
, unsigned long opt
)
254 struct hci_dev
*hdev
= req
->hdev
;
256 BT_DBG("%s %ld", hdev
->name
, opt
);
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
260 hci_reset_req(req
, 0);
262 switch (hdev
->dev_type
) {
270 BT_ERR("Unknown device type %d", hdev
->dev_type
);
277 static void bredr_setup(struct hci_request
*req
)
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
285 /* Read Class of Device */
286 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
288 /* Read Local Name */
289 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
291 /* Read Voice Setting */
292 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
294 /* Read Number of Supported IAC */
295 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
297 /* Read Current IAC LAP */
298 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
300 /* Clear Event Filters */
301 flt_type
= HCI_FLT_CLEAR_ALL
;
302 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
304 /* Connection accept timeout ~20 secs */
305 param
= cpu_to_le16(0x7d00);
306 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
309 static void le_setup(struct hci_request
*req
)
311 struct hci_dev
*hdev
= req
->hdev
;
313 /* Read LE Buffer Size */
314 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
316 /* Read LE Local Supported Features */
317 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
319 /* Read LE Supported States */
320 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev
))
324 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
327 static void hci_setup_event_mask(struct hci_request
*req
)
329 struct hci_dev
*hdev
= req
->hdev
;
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
340 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
343 if (lmp_bredr_capable(hdev
)) {
344 events
[4] |= 0x01; /* Flow Specification Complete */
346 /* Use a different default for LE-only devices */
347 memset(events
, 0, sizeof(events
));
348 events
[1] |= 0x20; /* Command Complete */
349 events
[1] |= 0x40; /* Command Status */
350 events
[1] |= 0x80; /* Hardware Error */
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
356 if (hdev
->commands
[0] & 0x20) {
357 events
[0] |= 0x10; /* Disconnection Complete */
358 events
[2] |= 0x04; /* Number of Completed Packets */
359 events
[3] |= 0x02; /* Data Buffer Overflow */
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
365 if (hdev
->commands
[2] & 0x80)
366 events
[1] |= 0x08; /* Read Remote Version Information
370 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
371 events
[0] |= 0x80; /* Encryption Change */
372 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
376 if (lmp_inq_rssi_capable(hdev
) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
))
378 events
[4] |= 0x02; /* Inquiry Result with RSSI */
380 if (lmp_ext_feat_capable(hdev
))
381 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
383 if (lmp_esco_capable(hdev
)) {
384 events
[5] |= 0x08; /* Synchronous Connection Complete */
385 events
[5] |= 0x10; /* Synchronous Connection Changed */
388 if (lmp_sniffsubr_capable(hdev
))
389 events
[5] |= 0x20; /* Sniff Subrating */
391 if (lmp_pause_enc_capable(hdev
))
392 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
394 if (lmp_ext_inq_capable(hdev
))
395 events
[5] |= 0x40; /* Extended Inquiry Result */
397 if (lmp_no_flush_capable(hdev
))
398 events
[7] |= 0x01; /* Enhanced Flush Complete */
400 if (lmp_lsto_capable(hdev
))
401 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
403 if (lmp_ssp_capable(hdev
)) {
404 events
[6] |= 0x01; /* IO Capability Request */
405 events
[6] |= 0x02; /* IO Capability Response */
406 events
[6] |= 0x04; /* User Confirmation Request */
407 events
[6] |= 0x08; /* User Passkey Request */
408 events
[6] |= 0x10; /* Remote OOB Data Request */
409 events
[6] |= 0x20; /* Simple Pairing Complete */
410 events
[7] |= 0x04; /* User Passkey Notification */
411 events
[7] |= 0x08; /* Keypress Notification */
412 events
[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
417 if (lmp_le_capable(hdev
))
418 events
[7] |= 0x20; /* LE Meta-Event */
420 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
423 static int hci_init2_req(struct hci_request
*req
, unsigned long opt
)
425 struct hci_dev
*hdev
= req
->hdev
;
427 if (hdev
->dev_type
== HCI_AMP
)
428 return amp_init2(req
);
430 if (lmp_bredr_capable(hdev
))
433 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
435 if (lmp_le_capable(hdev
))
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
446 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
448 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
450 if (lmp_ssp_capable(hdev
)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
457 hdev
->max_page
= 0x01;
459 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
462 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
463 sizeof(mode
), &mode
);
465 struct hci_cp_write_eir cp
;
467 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
468 memset(&cp
, 0, sizeof(cp
));
470 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
474 if (lmp_inq_rssi_capable(hdev
) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
482 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
484 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
487 if (lmp_inq_tx_pwr_capable(hdev
))
488 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
490 if (lmp_ext_feat_capable(hdev
)) {
491 struct hci_cp_read_local_ext_features cp
;
494 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
498 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
500 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
507 static void hci_setup_link_policy(struct hci_request
*req
)
509 struct hci_dev
*hdev
= req
->hdev
;
510 struct hci_cp_write_def_link_policy cp
;
513 if (lmp_rswitch_capable(hdev
))
514 link_policy
|= HCI_LP_RSWITCH
;
515 if (lmp_hold_capable(hdev
))
516 link_policy
|= HCI_LP_HOLD
;
517 if (lmp_sniff_capable(hdev
))
518 link_policy
|= HCI_LP_SNIFF
;
519 if (lmp_park_capable(hdev
))
520 link_policy
|= HCI_LP_PARK
;
522 cp
.policy
= cpu_to_le16(link_policy
);
523 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
526 static void hci_set_le_support(struct hci_request
*req
)
528 struct hci_dev
*hdev
= req
->hdev
;
529 struct hci_cp_write_le_host_supported cp
;
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev
))
535 memset(&cp
, 0, sizeof(cp
));
537 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
542 if (cp
.le
!= lmp_host_le_capable(hdev
))
543 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
547 static void hci_set_event_mask_page_2(struct hci_request
*req
)
549 struct hci_dev
*hdev
= req
->hdev
;
550 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
552 /* If Connectionless Slave Broadcast master role is supported
553 * enable all necessary events for it.
555 if (lmp_csb_master_capable(hdev
)) {
556 events
[1] |= 0x40; /* Triggered Clock Capture */
557 events
[1] |= 0x80; /* Synchronization Train Complete */
558 events
[2] |= 0x10; /* Slave Page Response Timeout */
559 events
[2] |= 0x20; /* CSB Channel Map Change */
562 /* If Connectionless Slave Broadcast slave role is supported
563 * enable all necessary events for it.
565 if (lmp_csb_slave_capable(hdev
)) {
566 events
[2] |= 0x01; /* Synchronization Train Received */
567 events
[2] |= 0x02; /* CSB Receive */
568 events
[2] |= 0x04; /* CSB Timeout */
569 events
[2] |= 0x08; /* Truncated Page Complete */
572 /* Enable Authenticated Payload Timeout Expired event if supported */
573 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
576 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
579 static int hci_init3_req(struct hci_request
*req
, unsigned long opt
)
581 struct hci_dev
*hdev
= req
->hdev
;
584 hci_setup_event_mask(req
);
586 if (hdev
->commands
[6] & 0x20 &&
587 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
588 struct hci_cp_read_stored_link_key cp
;
590 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
592 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
595 if (hdev
->commands
[5] & 0x10)
596 hci_setup_link_policy(req
);
598 if (hdev
->commands
[8] & 0x01)
599 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
601 /* Some older Broadcom based Bluetooth 1.2 controllers do not
602 * support the Read Page Scan Type command. Check support for
603 * this command in the bit mask of supported commands.
605 if (hdev
->commands
[13] & 0x01)
606 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
608 if (lmp_le_capable(hdev
)) {
611 memset(events
, 0, sizeof(events
));
613 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
614 events
[0] |= 0x10; /* LE Long Term Key Request */
616 /* If controller supports the Connection Parameters Request
617 * Link Layer Procedure, enable the corresponding event.
619 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
620 events
[0] |= 0x20; /* LE Remote Connection
624 /* If the controller supports the Data Length Extension
625 * feature, enable the corresponding event.
627 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
628 events
[0] |= 0x40; /* LE Data Length Change */
630 /* If the controller supports Extended Scanner Filter
631 * Policies, enable the correspondig event.
633 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
634 events
[1] |= 0x04; /* LE Direct Advertising
638 /* If the controller supports the LE Set Scan Enable command,
639 * enable the corresponding advertising report event.
641 if (hdev
->commands
[26] & 0x08)
642 events
[0] |= 0x02; /* LE Advertising Report */
644 /* If the controller supports the LE Create Connection
645 * command, enable the corresponding event.
647 if (hdev
->commands
[26] & 0x10)
648 events
[0] |= 0x01; /* LE Connection Complete */
650 /* If the controller supports the LE Connection Update
651 * command, enable the corresponding event.
653 if (hdev
->commands
[27] & 0x04)
654 events
[0] |= 0x04; /* LE Connection Update
658 /* If the controller supports the LE Read Remote Used Features
659 * command, enable the corresponding event.
661 if (hdev
->commands
[27] & 0x20)
662 events
[0] |= 0x08; /* LE Read Remote Used
666 /* If the controller supports the LE Read Local P-256
667 * Public Key command, enable the corresponding event.
669 if (hdev
->commands
[34] & 0x02)
670 events
[0] |= 0x80; /* LE Read Local P-256
671 * Public Key Complete
674 /* If the controller supports the LE Generate DHKey
675 * command, enable the corresponding event.
677 if (hdev
->commands
[34] & 0x04)
678 events
[1] |= 0x01; /* LE Generate DHKey Complete */
680 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
683 if (hdev
->commands
[25] & 0x40) {
684 /* Read LE Advertising Channel TX Power */
685 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
688 if (hdev
->commands
[26] & 0x40) {
689 /* Read LE White List Size */
690 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
,
694 if (hdev
->commands
[26] & 0x80) {
695 /* Clear LE White List */
696 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
699 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
700 /* Read LE Maximum Data Length */
701 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
703 /* Read LE Suggested Default Data Length */
704 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
707 hci_set_le_support(req
);
710 /* Read features beyond page 1 if available */
711 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
712 struct hci_cp_read_local_ext_features cp
;
715 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
722 static int hci_init4_req(struct hci_request
*req
, unsigned long opt
)
724 struct hci_dev
*hdev
= req
->hdev
;
726 /* Some Broadcom based Bluetooth controllers do not support the
727 * Delete Stored Link Key command. They are clearly indicating its
728 * absence in the bit mask of supported commands.
730 * Check the supported commands and only if the the command is marked
731 * as supported send it. If not supported assume that the controller
732 * does not have actual support for stored link keys which makes this
733 * command redundant anyway.
735 * Some controllers indicate that they support handling deleting
736 * stored link keys, but they don't. The quirk lets a driver
737 * just disable this command.
739 if (hdev
->commands
[6] & 0x80 &&
740 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
741 struct hci_cp_delete_stored_link_key cp
;
743 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
744 cp
.delete_all
= 0x01;
745 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
749 /* Set event mask page 2 if the HCI command for it is supported */
750 if (hdev
->commands
[22] & 0x04)
751 hci_set_event_mask_page_2(req
);
753 /* Read local codec list if the HCI command is supported */
754 if (hdev
->commands
[29] & 0x20)
755 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
757 /* Get MWS transport configuration if the HCI command is supported */
758 if (hdev
->commands
[30] & 0x08)
759 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
761 /* Check for Synchronization Train support */
762 if (lmp_sync_train_capable(hdev
))
763 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
765 /* Enable Secure Connections if supported and configured */
766 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
767 bredr_sc_enabled(hdev
)) {
770 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
771 sizeof(support
), &support
);
777 static int __hci_init(struct hci_dev
*hdev
)
781 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
785 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
786 hci_debugfs_create_basic(hdev
);
788 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
792 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
793 * BR/EDR/LE type controllers. AMP controllers only need the
794 * first two stages of init.
796 if (hdev
->dev_type
!= HCI_PRIMARY
)
799 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
803 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
807 /* This function is only called when the controller is actually in
808 * configured state. When the controller is marked as unconfigured,
809 * this initialization procedure is not run.
811 * It means that it is possible that a controller runs through its
812 * setup phase and then discovers missing settings. If that is the
813 * case, then this function will not be called. It then will only
814 * be called during the config phase.
816 * So only when in setup phase or config phase, create the debugfs
817 * entries and register the SMP channels.
819 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
820 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
823 hci_debugfs_create_common(hdev
);
825 if (lmp_bredr_capable(hdev
))
826 hci_debugfs_create_bredr(hdev
);
828 if (lmp_le_capable(hdev
))
829 hci_debugfs_create_le(hdev
);
834 static int hci_init0_req(struct hci_request
*req
, unsigned long opt
)
836 struct hci_dev
*hdev
= req
->hdev
;
838 BT_DBG("%s %ld", hdev
->name
, opt
);
841 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
842 hci_reset_req(req
, 0);
844 /* Read Local Version */
845 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
847 /* Read BD Address */
848 if (hdev
->set_bdaddr
)
849 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
854 static int __hci_unconf_init(struct hci_dev
*hdev
)
858 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
861 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
865 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
866 hci_debugfs_create_basic(hdev
);
871 static int hci_scan_req(struct hci_request
*req
, unsigned long opt
)
875 BT_DBG("%s %x", req
->hdev
->name
, scan
);
877 /* Inquiry and Page scans */
878 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
882 static int hci_auth_req(struct hci_request
*req
, unsigned long opt
)
886 BT_DBG("%s %x", req
->hdev
->name
, auth
);
889 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
893 static int hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
897 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
900 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
904 static int hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
906 __le16 policy
= cpu_to_le16(opt
);
908 BT_DBG("%s %x", req
->hdev
->name
, policy
);
910 /* Default link policy */
911 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
915 /* Get HCI device by index.
916 * Device is held on return. */
917 struct hci_dev
*hci_dev_get(int index
)
919 struct hci_dev
*hdev
= NULL
, *d
;
926 read_lock(&hci_dev_list_lock
);
927 list_for_each_entry(d
, &hci_dev_list
, list
) {
928 if (d
->id
== index
) {
929 hdev
= hci_dev_hold(d
);
933 read_unlock(&hci_dev_list_lock
);
937 /* ---- Inquiry support ---- */
939 bool hci_discovery_active(struct hci_dev
*hdev
)
941 struct discovery_state
*discov
= &hdev
->discovery
;
943 switch (discov
->state
) {
944 case DISCOVERY_FINDING
:
945 case DISCOVERY_RESOLVING
:
953 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
955 int old_state
= hdev
->discovery
.state
;
957 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
959 if (old_state
== state
)
962 hdev
->discovery
.state
= state
;
965 case DISCOVERY_STOPPED
:
966 hci_update_background_scan(hdev
);
968 if (old_state
!= DISCOVERY_STARTING
)
969 mgmt_discovering(hdev
, 0);
971 case DISCOVERY_STARTING
:
973 case DISCOVERY_FINDING
:
974 mgmt_discovering(hdev
, 1);
976 case DISCOVERY_RESOLVING
:
978 case DISCOVERY_STOPPING
:
983 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
985 struct discovery_state
*cache
= &hdev
->discovery
;
986 struct inquiry_entry
*p
, *n
;
988 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
993 INIT_LIST_HEAD(&cache
->unknown
);
994 INIT_LIST_HEAD(&cache
->resolve
);
997 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1000 struct discovery_state
*cache
= &hdev
->discovery
;
1001 struct inquiry_entry
*e
;
1003 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1005 list_for_each_entry(e
, &cache
->all
, all
) {
1006 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1013 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1016 struct discovery_state
*cache
= &hdev
->discovery
;
1017 struct inquiry_entry
*e
;
1019 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1021 list_for_each_entry(e
, &cache
->unknown
, list
) {
1022 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1029 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1033 struct discovery_state
*cache
= &hdev
->discovery
;
1034 struct inquiry_entry
*e
;
1036 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1038 list_for_each_entry(e
, &cache
->resolve
, list
) {
1039 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1041 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1048 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1049 struct inquiry_entry
*ie
)
1051 struct discovery_state
*cache
= &hdev
->discovery
;
1052 struct list_head
*pos
= &cache
->resolve
;
1053 struct inquiry_entry
*p
;
1055 list_del(&ie
->list
);
1057 list_for_each_entry(p
, &cache
->resolve
, list
) {
1058 if (p
->name_state
!= NAME_PENDING
&&
1059 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1064 list_add(&ie
->list
, pos
);
1067 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1070 struct discovery_state
*cache
= &hdev
->discovery
;
1071 struct inquiry_entry
*ie
;
1074 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1076 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1078 if (!data
->ssp_mode
)
1079 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1081 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1083 if (!ie
->data
.ssp_mode
)
1084 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1086 if (ie
->name_state
== NAME_NEEDED
&&
1087 data
->rssi
!= ie
->data
.rssi
) {
1088 ie
->data
.rssi
= data
->rssi
;
1089 hci_inquiry_cache_update_resolve(hdev
, ie
);
1095 /* Entry not in the cache. Add new one. */
1096 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1098 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1102 list_add(&ie
->all
, &cache
->all
);
1105 ie
->name_state
= NAME_KNOWN
;
1107 ie
->name_state
= NAME_NOT_KNOWN
;
1108 list_add(&ie
->list
, &cache
->unknown
);
1112 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1113 ie
->name_state
!= NAME_PENDING
) {
1114 ie
->name_state
= NAME_KNOWN
;
1115 list_del(&ie
->list
);
1118 memcpy(&ie
->data
, data
, sizeof(*data
));
1119 ie
->timestamp
= jiffies
;
1120 cache
->timestamp
= jiffies
;
1122 if (ie
->name_state
== NAME_NOT_KNOWN
)
1123 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1129 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1131 struct discovery_state
*cache
= &hdev
->discovery
;
1132 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1133 struct inquiry_entry
*e
;
1136 list_for_each_entry(e
, &cache
->all
, all
) {
1137 struct inquiry_data
*data
= &e
->data
;
1142 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1143 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1144 info
->pscan_period_mode
= data
->pscan_period_mode
;
1145 info
->pscan_mode
= data
->pscan_mode
;
1146 memcpy(info
->dev_class
, data
->dev_class
, 3);
1147 info
->clock_offset
= data
->clock_offset
;
1153 BT_DBG("cache %p, copied %d", cache
, copied
);
1157 static int hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1159 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1160 struct hci_dev
*hdev
= req
->hdev
;
1161 struct hci_cp_inquiry cp
;
1163 BT_DBG("%s", hdev
->name
);
1165 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1169 memcpy(&cp
.lap
, &ir
->lap
, 3);
1170 cp
.length
= ir
->length
;
1171 cp
.num_rsp
= ir
->num_rsp
;
1172 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1177 int hci_inquiry(void __user
*arg
)
1179 __u8 __user
*ptr
= arg
;
1180 struct hci_inquiry_req ir
;
1181 struct hci_dev
*hdev
;
1182 int err
= 0, do_inquiry
= 0, max_rsp
;
1186 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1189 hdev
= hci_dev_get(ir
.dev_id
);
1193 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1198 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1203 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1208 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1214 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1215 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1216 hci_inquiry_cache_flush(hdev
);
1219 hci_dev_unlock(hdev
);
1221 timeo
= ir
.length
* msecs_to_jiffies(2000);
1224 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1229 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1230 * cleared). If it is interrupted by a signal, return -EINTR.
1232 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1233 TASK_INTERRUPTIBLE
))
1237 /* for unlimited number of responses we will use buffer with
1240 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1242 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1243 * copy it to the user space.
1245 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1252 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1253 hci_dev_unlock(hdev
);
1255 BT_DBG("num_rsp %d", ir
.num_rsp
);
1257 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1259 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1272 static int hci_dev_do_open(struct hci_dev
*hdev
)
1276 BT_DBG("%s %p", hdev
->name
, hdev
);
1278 hci_req_sync_lock(hdev
);
1280 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1285 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1286 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1287 /* Check for rfkill but allow the HCI setup stage to
1288 * proceed (which in itself doesn't cause any RF activity).
1290 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1295 /* Check for valid public address or a configured static
1296 * random adddress, but let the HCI setup proceed to
1297 * be able to determine if there is a public address
1300 * In case of user channel usage, it is not important
1301 * if a public address or static random address is
1304 * This check is only valid for BR/EDR controllers
1305 * since AMP controllers do not have an address.
1307 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1308 hdev
->dev_type
== HCI_PRIMARY
&&
1309 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1310 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1311 ret
= -EADDRNOTAVAIL
;
1316 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1321 if (hdev
->open(hdev
)) {
1326 set_bit(HCI_RUNNING
, &hdev
->flags
);
1327 hci_sock_dev_event(hdev
, HCI_DEV_OPEN
);
1329 atomic_set(&hdev
->cmd_cnt
, 1);
1330 set_bit(HCI_INIT
, &hdev
->flags
);
1332 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1333 hci_sock_dev_event(hdev
, HCI_DEV_SETUP
);
1336 ret
= hdev
->setup(hdev
);
1338 /* The transport driver can set these quirks before
1339 * creating the HCI device or in its setup callback.
1341 * In case any of them is set, the controller has to
1342 * start up as unconfigured.
1344 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1345 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1346 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1348 /* For an unconfigured controller it is required to
1349 * read at least the version information provided by
1350 * the Read Local Version Information command.
1352 * If the set_bdaddr driver callback is provided, then
1353 * also the original Bluetooth public device address
1354 * will be read using the Read BD Address command.
1356 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1357 ret
= __hci_unconf_init(hdev
);
1360 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1361 /* If public address change is configured, ensure that
1362 * the address gets programmed. If the driver does not
1363 * support changing the public address, fail the power
1366 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1368 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1370 ret
= -EADDRNOTAVAIL
;
1374 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1375 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1376 ret
= __hci_init(hdev
);
1377 if (!ret
&& hdev
->post_init
)
1378 ret
= hdev
->post_init(hdev
);
1382 /* If the HCI Reset command is clearing all diagnostic settings,
1383 * then they need to be reprogrammed after the init procedure
1386 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
1387 hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) && hdev
->set_diag
)
1388 ret
= hdev
->set_diag(hdev
, true);
1390 clear_bit(HCI_INIT
, &hdev
->flags
);
1394 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1395 set_bit(HCI_UP
, &hdev
->flags
);
1396 hci_sock_dev_event(hdev
, HCI_DEV_UP
);
1397 hci_leds_update_powered(hdev
, true);
1398 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1399 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1400 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1401 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1402 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
1403 hdev
->dev_type
== HCI_PRIMARY
) {
1404 ret
= __hci_req_hci_power_on(hdev
);
1405 mgmt_power_on(hdev
, ret
);
1408 /* Init failed, cleanup */
1409 flush_work(&hdev
->tx_work
);
1410 flush_work(&hdev
->cmd_work
);
1411 flush_work(&hdev
->rx_work
);
1413 skb_queue_purge(&hdev
->cmd_q
);
1414 skb_queue_purge(&hdev
->rx_q
);
1419 if (hdev
->sent_cmd
) {
1420 kfree_skb(hdev
->sent_cmd
);
1421 hdev
->sent_cmd
= NULL
;
1424 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1425 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1428 hdev
->flags
&= BIT(HCI_RAW
);
1432 hci_req_sync_unlock(hdev
);
1436 /* ---- HCI ioctl helpers ---- */
1438 int hci_dev_open(__u16 dev
)
1440 struct hci_dev
*hdev
;
1443 hdev
= hci_dev_get(dev
);
1447 /* Devices that are marked as unconfigured can only be powered
1448 * up as user channel. Trying to bring them up as normal devices
1449 * will result into a failure. Only user channel operation is
1452 * When this function is called for a user channel, the flag
1453 * HCI_USER_CHANNEL will be set first before attempting to
1456 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1457 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1462 /* We need to ensure that no other power on/off work is pending
1463 * before proceeding to call hci_dev_do_open. This is
1464 * particularly important if the setup procedure has not yet
1467 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1468 cancel_delayed_work(&hdev
->power_off
);
1470 /* After this call it is guaranteed that the setup procedure
1471 * has finished. This means that error conditions like RFKILL
1472 * or no valid public or static random address apply.
1474 flush_workqueue(hdev
->req_workqueue
);
1476 /* For controllers not using the management interface and that
1477 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478 * so that pairing works for them. Once the management interface
1479 * is in use this bit will be cleared again and userspace has
1480 * to explicitly enable it.
1482 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1483 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1484 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1486 err
= hci_dev_do_open(hdev
);
1493 /* This function requires the caller holds hdev->lock */
1494 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1496 struct hci_conn_params
*p
;
1498 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1500 hci_conn_drop(p
->conn
);
1501 hci_conn_put(p
->conn
);
1504 list_del_init(&p
->action
);
1507 BT_DBG("All LE pending actions cleared");
1510 int hci_dev_do_close(struct hci_dev
*hdev
)
1514 BT_DBG("%s %p", hdev
->name
, hdev
);
1516 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1517 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1518 test_bit(HCI_UP
, &hdev
->flags
)) {
1519 /* Execute vendor specific shutdown routine */
1521 hdev
->shutdown(hdev
);
1524 cancel_delayed_work(&hdev
->power_off
);
1526 hci_request_cancel_all(hdev
);
1527 hci_req_sync_lock(hdev
);
1529 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1530 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1531 hci_req_sync_unlock(hdev
);
1535 hci_leds_update_powered(hdev
, false);
1537 /* Flush RX and TX works */
1538 flush_work(&hdev
->tx_work
);
1539 flush_work(&hdev
->rx_work
);
1541 if (hdev
->discov_timeout
> 0) {
1542 hdev
->discov_timeout
= 0;
1543 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1544 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1547 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1548 cancel_delayed_work(&hdev
->service_cache
);
1550 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1551 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1553 /* Avoid potential lockdep warnings from the *_flush() calls by
1554 * ensuring the workqueue is empty up front.
1556 drain_workqueue(hdev
->workqueue
);
1560 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1562 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1564 if (!auto_off
&& hdev
->dev_type
== HCI_PRIMARY
&&
1565 hci_dev_test_flag(hdev
, HCI_MGMT
))
1566 __mgmt_power_off(hdev
);
1568 hci_inquiry_cache_flush(hdev
);
1569 hci_pend_le_actions_clear(hdev
);
1570 hci_conn_hash_flush(hdev
);
1571 hci_dev_unlock(hdev
);
1573 smp_unregister(hdev
);
1575 hci_sock_dev_event(hdev
, HCI_DEV_DOWN
);
1581 skb_queue_purge(&hdev
->cmd_q
);
1582 atomic_set(&hdev
->cmd_cnt
, 1);
1583 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1584 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1585 set_bit(HCI_INIT
, &hdev
->flags
);
1586 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
, NULL
);
1587 clear_bit(HCI_INIT
, &hdev
->flags
);
1590 /* flush cmd work */
1591 flush_work(&hdev
->cmd_work
);
1594 skb_queue_purge(&hdev
->rx_q
);
1595 skb_queue_purge(&hdev
->cmd_q
);
1596 skb_queue_purge(&hdev
->raw_q
);
1598 /* Drop last sent command */
1599 if (hdev
->sent_cmd
) {
1600 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1601 kfree_skb(hdev
->sent_cmd
);
1602 hdev
->sent_cmd
= NULL
;
1605 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1606 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1608 /* After this point our queues are empty
1609 * and no tasks are scheduled. */
1613 hdev
->flags
&= BIT(HCI_RAW
);
1614 hci_dev_clear_volatile_flags(hdev
);
1616 /* Controller radio is available but is currently powered down */
1617 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1619 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1620 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1621 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1623 hci_req_sync_unlock(hdev
);
1629 int hci_dev_close(__u16 dev
)
1631 struct hci_dev
*hdev
;
1634 hdev
= hci_dev_get(dev
);
1638 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1643 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1644 cancel_delayed_work(&hdev
->power_off
);
1646 err
= hci_dev_do_close(hdev
);
1653 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1657 BT_DBG("%s %p", hdev
->name
, hdev
);
1659 hci_req_sync_lock(hdev
);
1662 skb_queue_purge(&hdev
->rx_q
);
1663 skb_queue_purge(&hdev
->cmd_q
);
1665 /* Avoid potential lockdep warnings from the *_flush() calls by
1666 * ensuring the workqueue is empty up front.
1668 drain_workqueue(hdev
->workqueue
);
1671 hci_inquiry_cache_flush(hdev
);
1672 hci_conn_hash_flush(hdev
);
1673 hci_dev_unlock(hdev
);
1678 atomic_set(&hdev
->cmd_cnt
, 1);
1679 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1681 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
1683 hci_req_sync_unlock(hdev
);
1687 int hci_dev_reset(__u16 dev
)
1689 struct hci_dev
*hdev
;
1692 hdev
= hci_dev_get(dev
);
1696 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1701 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1706 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1711 err
= hci_dev_do_reset(hdev
);
1718 int hci_dev_reset_stat(__u16 dev
)
1720 struct hci_dev
*hdev
;
1723 hdev
= hci_dev_get(dev
);
1727 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1732 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1737 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1744 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1746 bool conn_changed
, discov_changed
;
1748 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1750 if ((scan
& SCAN_PAGE
))
1751 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1754 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1757 if ((scan
& SCAN_INQUIRY
)) {
1758 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1761 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1762 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1766 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1769 if (conn_changed
|| discov_changed
) {
1770 /* In case this was disabled through mgmt */
1771 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1773 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1774 hci_req_update_adv_data(hdev
, hdev
->cur_adv_instance
);
1776 mgmt_new_settings(hdev
);
1780 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1782 struct hci_dev
*hdev
;
1783 struct hci_dev_req dr
;
1786 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1789 hdev
= hci_dev_get(dr
.dev_id
);
1793 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1798 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1803 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1808 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1815 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1816 HCI_INIT_TIMEOUT
, NULL
);
1820 if (!lmp_encrypt_capable(hdev
)) {
1825 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1826 /* Auth must be enabled first */
1827 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1828 HCI_INIT_TIMEOUT
, NULL
);
1833 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1834 HCI_INIT_TIMEOUT
, NULL
);
1838 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1839 HCI_INIT_TIMEOUT
, NULL
);
1841 /* Ensure that the connectable and discoverable states
1842 * get correctly modified as this was a non-mgmt change.
1845 hci_update_scan_state(hdev
, dr
.dev_opt
);
1849 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1850 HCI_INIT_TIMEOUT
, NULL
);
1853 case HCISETLINKMODE
:
1854 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1855 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1859 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1863 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1864 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1868 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1869 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1882 int hci_get_dev_list(void __user
*arg
)
1884 struct hci_dev
*hdev
;
1885 struct hci_dev_list_req
*dl
;
1886 struct hci_dev_req
*dr
;
1887 int n
= 0, size
, err
;
1890 if (get_user(dev_num
, (__u16 __user
*) arg
))
1893 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1896 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1898 dl
= kzalloc(size
, GFP_KERNEL
);
1904 read_lock(&hci_dev_list_lock
);
1905 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1906 unsigned long flags
= hdev
->flags
;
1908 /* When the auto-off is configured it means the transport
1909 * is running, but in that case still indicate that the
1910 * device is actually down.
1912 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1913 flags
&= ~BIT(HCI_UP
);
1915 (dr
+ n
)->dev_id
= hdev
->id
;
1916 (dr
+ n
)->dev_opt
= flags
;
1921 read_unlock(&hci_dev_list_lock
);
1924 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1926 err
= copy_to_user(arg
, dl
, size
);
1929 return err
? -EFAULT
: 0;
1932 int hci_get_dev_info(void __user
*arg
)
1934 struct hci_dev
*hdev
;
1935 struct hci_dev_info di
;
1936 unsigned long flags
;
1939 if (copy_from_user(&di
, arg
, sizeof(di
)))
1942 hdev
= hci_dev_get(di
.dev_id
);
1946 /* When the auto-off is configured it means the transport
1947 * is running, but in that case still indicate that the
1948 * device is actually down.
1950 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1951 flags
= hdev
->flags
& ~BIT(HCI_UP
);
1953 flags
= hdev
->flags
;
1955 strcpy(di
.name
, hdev
->name
);
1956 di
.bdaddr
= hdev
->bdaddr
;
1957 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
1959 di
.pkt_type
= hdev
->pkt_type
;
1960 if (lmp_bredr_capable(hdev
)) {
1961 di
.acl_mtu
= hdev
->acl_mtu
;
1962 di
.acl_pkts
= hdev
->acl_pkts
;
1963 di
.sco_mtu
= hdev
->sco_mtu
;
1964 di
.sco_pkts
= hdev
->sco_pkts
;
1966 di
.acl_mtu
= hdev
->le_mtu
;
1967 di
.acl_pkts
= hdev
->le_pkts
;
1971 di
.link_policy
= hdev
->link_policy
;
1972 di
.link_mode
= hdev
->link_mode
;
1974 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1975 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1977 if (copy_to_user(arg
, &di
, sizeof(di
)))
1985 /* ---- Interface to HCI drivers ---- */
1987 static int hci_rfkill_set_block(void *data
, bool blocked
)
1989 struct hci_dev
*hdev
= data
;
1991 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1993 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
1997 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
1998 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1999 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2000 hci_dev_do_close(hdev
);
2002 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2008 static const struct rfkill_ops hci_rfkill_ops
= {
2009 .set_block
= hci_rfkill_set_block
,
2012 static void hci_power_on(struct work_struct
*work
)
2014 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2017 BT_DBG("%s", hdev
->name
);
2019 if (test_bit(HCI_UP
, &hdev
->flags
) &&
2020 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
2021 hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
2022 cancel_delayed_work(&hdev
->power_off
);
2023 hci_req_sync_lock(hdev
);
2024 err
= __hci_req_hci_power_on(hdev
);
2025 hci_req_sync_unlock(hdev
);
2026 mgmt_power_on(hdev
, err
);
2030 err
= hci_dev_do_open(hdev
);
2033 mgmt_set_powered_failed(hdev
, err
);
2034 hci_dev_unlock(hdev
);
2038 /* During the HCI setup phase, a few error conditions are
2039 * ignored and they need to be checked now. If they are still
2040 * valid, it is important to turn the device back off.
2042 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2043 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2044 (hdev
->dev_type
== HCI_PRIMARY
&&
2045 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2046 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2047 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2048 hci_dev_do_close(hdev
);
2049 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2050 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2051 HCI_AUTO_OFF_TIMEOUT
);
2054 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2055 /* For unconfigured devices, set the HCI_RAW flag
2056 * so that userspace can easily identify them.
2058 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2059 set_bit(HCI_RAW
, &hdev
->flags
);
2061 /* For fully configured devices, this will send
2062 * the Index Added event. For unconfigured devices,
2063 * it will send Unconfigued Index Added event.
2065 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2066 * and no event will be send.
2068 mgmt_index_added(hdev
);
2069 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2070 /* When the controller is now configured, then it
2071 * is important to clear the HCI_RAW flag.
2073 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2074 clear_bit(HCI_RAW
, &hdev
->flags
);
2076 /* Powering on the controller with HCI_CONFIG set only
2077 * happens with the transition from unconfigured to
2078 * configured. This will send the Index Added event.
2080 mgmt_index_added(hdev
);
2084 static void hci_power_off(struct work_struct
*work
)
2086 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2089 BT_DBG("%s", hdev
->name
);
2091 hci_dev_do_close(hdev
);
2094 static void hci_error_reset(struct work_struct
*work
)
2096 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2098 BT_DBG("%s", hdev
->name
);
2101 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2103 BT_ERR("%s hardware error 0x%2.2x", hdev
->name
,
2104 hdev
->hw_error_code
);
2106 if (hci_dev_do_close(hdev
))
2109 hci_dev_do_open(hdev
);
2112 void hci_uuids_clear(struct hci_dev
*hdev
)
2114 struct bt_uuid
*uuid
, *tmp
;
2116 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2117 list_del(&uuid
->list
);
2122 void hci_link_keys_clear(struct hci_dev
*hdev
)
2124 struct link_key
*key
;
2126 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2127 list_del_rcu(&key
->list
);
2128 kfree_rcu(key
, rcu
);
2132 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2136 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2137 list_del_rcu(&k
->list
);
2142 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2146 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2147 list_del_rcu(&k
->list
);
2152 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2157 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2158 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2168 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2169 u8 key_type
, u8 old_key_type
)
2172 if (key_type
< 0x03)
2175 /* Debug keys are insecure so don't store them persistently */
2176 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2179 /* Changed combination key and there's no previous one */
2180 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2183 /* Security mode 3 case */
2187 /* BR/EDR key derived using SC from an LE link */
2188 if (conn
->type
== LE_LINK
)
2191 /* Neither local nor remote side had no-bonding as requirement */
2192 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2195 /* Local side had dedicated bonding as requirement */
2196 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2199 /* Remote side had dedicated bonding as requirement */
2200 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2203 /* If none of the above criteria match, then don't store the key
2208 static u8
ltk_role(u8 type
)
2210 if (type
== SMP_LTK
)
2211 return HCI_ROLE_MASTER
;
2213 return HCI_ROLE_SLAVE
;
2216 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2217 u8 addr_type
, u8 role
)
2222 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2223 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2226 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2236 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2238 struct smp_irk
*irk
;
2241 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2242 if (!bacmp(&irk
->rpa
, rpa
)) {
2248 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2249 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2250 bacpy(&irk
->rpa
, rpa
);
2260 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2263 struct smp_irk
*irk
;
2265 /* Identity Address must be public or static random */
2266 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2270 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2271 if (addr_type
== irk
->addr_type
&&
2272 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2282 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2283 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2284 u8 pin_len
, bool *persistent
)
2286 struct link_key
*key
, *old_key
;
2289 old_key
= hci_find_link_key(hdev
, bdaddr
);
2291 old_key_type
= old_key
->type
;
2294 old_key_type
= conn
? conn
->key_type
: 0xff;
2295 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2298 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2301 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2303 /* Some buggy controller combinations generate a changed
2304 * combination key for legacy pairing even when there's no
2306 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2307 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2308 type
= HCI_LK_COMBINATION
;
2310 conn
->key_type
= type
;
2313 bacpy(&key
->bdaddr
, bdaddr
);
2314 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2315 key
->pin_len
= pin_len
;
2317 if (type
== HCI_LK_CHANGED_COMBINATION
)
2318 key
->type
= old_key_type
;
2323 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2329 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2330 u8 addr_type
, u8 type
, u8 authenticated
,
2331 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2333 struct smp_ltk
*key
, *old_key
;
2334 u8 role
= ltk_role(type
);
2336 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2340 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2343 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2346 bacpy(&key
->bdaddr
, bdaddr
);
2347 key
->bdaddr_type
= addr_type
;
2348 memcpy(key
->val
, tk
, sizeof(key
->val
));
2349 key
->authenticated
= authenticated
;
2352 key
->enc_size
= enc_size
;
2358 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2359 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2361 struct smp_irk
*irk
;
2363 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2365 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2369 bacpy(&irk
->bdaddr
, bdaddr
);
2370 irk
->addr_type
= addr_type
;
2372 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2375 memcpy(irk
->val
, val
, 16);
2376 bacpy(&irk
->rpa
, rpa
);
2381 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2383 struct link_key
*key
;
2385 key
= hci_find_link_key(hdev
, bdaddr
);
2389 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2391 list_del_rcu(&key
->list
);
2392 kfree_rcu(key
, rcu
);
2397 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2402 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2403 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2406 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2408 list_del_rcu(&k
->list
);
2413 return removed
? 0 : -ENOENT
;
2416 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2420 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2421 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2424 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2426 list_del_rcu(&k
->list
);
2431 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2434 struct smp_irk
*irk
;
2437 if (type
== BDADDR_BREDR
) {
2438 if (hci_find_link_key(hdev
, bdaddr
))
2443 /* Convert to HCI addr type which struct smp_ltk uses */
2444 if (type
== BDADDR_LE_PUBLIC
)
2445 addr_type
= ADDR_LE_DEV_PUBLIC
;
2447 addr_type
= ADDR_LE_DEV_RANDOM
;
2449 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2451 bdaddr
= &irk
->bdaddr
;
2452 addr_type
= irk
->addr_type
;
2456 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2457 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2467 /* HCI command timer function */
2468 static void hci_cmd_timeout(struct work_struct
*work
)
2470 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2473 if (hdev
->sent_cmd
) {
2474 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2475 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2477 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2479 BT_ERR("%s command tx timeout", hdev
->name
);
2482 atomic_set(&hdev
->cmd_cnt
, 1);
2483 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2486 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2487 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2489 struct oob_data
*data
;
2491 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2492 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2494 if (data
->bdaddr_type
!= bdaddr_type
)
2502 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2505 struct oob_data
*data
;
2507 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2511 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2513 list_del(&data
->list
);
2519 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2521 struct oob_data
*data
, *n
;
2523 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2524 list_del(&data
->list
);
2529 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2530 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2531 u8
*hash256
, u8
*rand256
)
2533 struct oob_data
*data
;
2535 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2537 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2541 bacpy(&data
->bdaddr
, bdaddr
);
2542 data
->bdaddr_type
= bdaddr_type
;
2543 list_add(&data
->list
, &hdev
->remote_oob_data
);
2546 if (hash192
&& rand192
) {
2547 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2548 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2549 if (hash256
&& rand256
)
2550 data
->present
= 0x03;
2552 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2553 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2554 if (hash256
&& rand256
)
2555 data
->present
= 0x02;
2557 data
->present
= 0x00;
2560 if (hash256
&& rand256
) {
2561 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2562 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2564 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2565 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2566 if (hash192
&& rand192
)
2567 data
->present
= 0x01;
2570 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2575 /* This function requires the caller holds hdev->lock */
2576 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2578 struct adv_info
*adv_instance
;
2580 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2581 if (adv_instance
->instance
== instance
)
2582 return adv_instance
;
2588 /* This function requires the caller holds hdev->lock */
2589 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
)
2591 struct adv_info
*cur_instance
;
2593 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2597 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2598 struct adv_info
, list
))
2599 return list_first_entry(&hdev
->adv_instances
,
2600 struct adv_info
, list
);
2602 return list_next_entry(cur_instance
, list
);
2605 /* This function requires the caller holds hdev->lock */
2606 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2608 struct adv_info
*adv_instance
;
2610 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2614 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2616 if (hdev
->cur_adv_instance
== instance
) {
2617 if (hdev
->adv_instance_timeout
) {
2618 cancel_delayed_work(&hdev
->adv_instance_expire
);
2619 hdev
->adv_instance_timeout
= 0;
2621 hdev
->cur_adv_instance
= 0x00;
2624 list_del(&adv_instance
->list
);
2625 kfree(adv_instance
);
2627 hdev
->adv_instance_cnt
--;
2632 /* This function requires the caller holds hdev->lock */
2633 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2635 struct adv_info
*adv_instance
, *n
;
2637 if (hdev
->adv_instance_timeout
) {
2638 cancel_delayed_work(&hdev
->adv_instance_expire
);
2639 hdev
->adv_instance_timeout
= 0;
2642 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2643 list_del(&adv_instance
->list
);
2644 kfree(adv_instance
);
2647 hdev
->adv_instance_cnt
= 0;
2648 hdev
->cur_adv_instance
= 0x00;
2651 /* This function requires the caller holds hdev->lock */
2652 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2653 u16 adv_data_len
, u8
*adv_data
,
2654 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2655 u16 timeout
, u16 duration
)
2657 struct adv_info
*adv_instance
;
2659 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2661 memset(adv_instance
->adv_data
, 0,
2662 sizeof(adv_instance
->adv_data
));
2663 memset(adv_instance
->scan_rsp_data
, 0,
2664 sizeof(adv_instance
->scan_rsp_data
));
2666 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2667 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2670 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2674 adv_instance
->pending
= true;
2675 adv_instance
->instance
= instance
;
2676 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2677 hdev
->adv_instance_cnt
++;
2680 adv_instance
->flags
= flags
;
2681 adv_instance
->adv_data_len
= adv_data_len
;
2682 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2685 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2688 memcpy(adv_instance
->scan_rsp_data
,
2689 scan_rsp_data
, scan_rsp_len
);
2691 adv_instance
->timeout
= timeout
;
2692 adv_instance
->remaining_time
= timeout
;
2695 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2697 adv_instance
->duration
= duration
;
2699 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2704 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2705 bdaddr_t
*bdaddr
, u8 type
)
2707 struct bdaddr_list
*b
;
2709 list_for_each_entry(b
, bdaddr_list
, list
) {
2710 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2717 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2719 struct bdaddr_list
*b
, *n
;
2721 list_for_each_entry_safe(b
, n
, bdaddr_list
, list
) {
2727 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2729 struct bdaddr_list
*entry
;
2731 if (!bacmp(bdaddr
, BDADDR_ANY
))
2734 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2737 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2741 bacpy(&entry
->bdaddr
, bdaddr
);
2742 entry
->bdaddr_type
= type
;
2744 list_add(&entry
->list
, list
);
2749 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2751 struct bdaddr_list
*entry
;
2753 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2754 hci_bdaddr_list_clear(list
);
2758 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2762 list_del(&entry
->list
);
2768 /* This function requires the caller holds hdev->lock */
2769 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2770 bdaddr_t
*addr
, u8 addr_type
)
2772 struct hci_conn_params
*params
;
2774 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2775 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2776 params
->addr_type
== addr_type
) {
2784 /* This function requires the caller holds hdev->lock */
2785 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2786 bdaddr_t
*addr
, u8 addr_type
)
2788 struct hci_conn_params
*param
;
2790 list_for_each_entry(param
, list
, action
) {
2791 if (bacmp(¶m
->addr
, addr
) == 0 &&
2792 param
->addr_type
== addr_type
)
2799 /* This function requires the caller holds hdev->lock */
2800 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2801 bdaddr_t
*addr
, u8 addr_type
)
2803 struct hci_conn_params
*params
;
2805 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2809 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2811 BT_ERR("Out of memory");
2815 bacpy(¶ms
->addr
, addr
);
2816 params
->addr_type
= addr_type
;
2818 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2819 INIT_LIST_HEAD(¶ms
->action
);
2821 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2822 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2823 params
->conn_latency
= hdev
->le_conn_latency
;
2824 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2825 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2827 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2832 static void hci_conn_params_free(struct hci_conn_params
*params
)
2835 hci_conn_drop(params
->conn
);
2836 hci_conn_put(params
->conn
);
2839 list_del(¶ms
->action
);
2840 list_del(¶ms
->list
);
2844 /* This function requires the caller holds hdev->lock */
2845 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2847 struct hci_conn_params
*params
;
2849 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2853 hci_conn_params_free(params
);
2855 hci_update_background_scan(hdev
);
2857 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2860 /* This function requires the caller holds hdev->lock */
2861 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
2863 struct hci_conn_params
*params
, *tmp
;
2865 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
2866 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
2869 /* If trying to estabilish one time connection to disabled
2870 * device, leave the params, but mark them as just once.
2872 if (params
->explicit_connect
) {
2873 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2877 list_del(¶ms
->list
);
2881 BT_DBG("All LE disabled connection parameters were removed");
2884 /* This function requires the caller holds hdev->lock */
2885 static void hci_conn_params_clear_all(struct hci_dev
*hdev
)
2887 struct hci_conn_params
*params
, *tmp
;
2889 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
2890 hci_conn_params_free(params
);
2892 BT_DBG("All LE connection parameters were removed");
2895 /* Copy the Identity Address of the controller.
2897 * If the controller has a public BD_ADDR, then by default use that one.
2898 * If this is a LE only controller without a public address, default to
2899 * the static random address.
2901 * For debugging purposes it is possible to force controllers with a
2902 * public address to use the static random address instead.
2904 * In case BR/EDR has been disabled on a dual-mode controller and
2905 * userspace has configured a static address, then that address
2906 * becomes the identity address instead of the public BR/EDR address.
2908 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2911 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
2912 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
2913 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
2914 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2915 bacpy(bdaddr
, &hdev
->static_addr
);
2916 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
2918 bacpy(bdaddr
, &hdev
->bdaddr
);
2919 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
2923 /* Alloc HCI device */
2924 struct hci_dev
*hci_alloc_dev(void)
2926 struct hci_dev
*hdev
;
2928 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
2932 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2933 hdev
->esco_type
= (ESCO_HV1
);
2934 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2935 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2936 hdev
->io_capability
= 0x03; /* No Input No Output */
2937 hdev
->manufacturer
= 0xffff; /* Default to internal use */
2938 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2939 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2940 hdev
->adv_instance_cnt
= 0;
2941 hdev
->cur_adv_instance
= 0x00;
2942 hdev
->adv_instance_timeout
= 0;
2944 hdev
->sniff_max_interval
= 800;
2945 hdev
->sniff_min_interval
= 80;
2947 hdev
->le_adv_channel_map
= 0x07;
2948 hdev
->le_adv_min_interval
= 0x0800;
2949 hdev
->le_adv_max_interval
= 0x0800;
2950 hdev
->le_scan_interval
= 0x0060;
2951 hdev
->le_scan_window
= 0x0030;
2952 hdev
->le_conn_min_interval
= 0x0028;
2953 hdev
->le_conn_max_interval
= 0x0038;
2954 hdev
->le_conn_latency
= 0x0000;
2955 hdev
->le_supv_timeout
= 0x002a;
2956 hdev
->le_def_tx_len
= 0x001b;
2957 hdev
->le_def_tx_time
= 0x0148;
2958 hdev
->le_max_tx_len
= 0x001b;
2959 hdev
->le_max_tx_time
= 0x0148;
2960 hdev
->le_max_rx_len
= 0x001b;
2961 hdev
->le_max_rx_time
= 0x0148;
2963 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
2964 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
2965 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
2966 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
2968 mutex_init(&hdev
->lock
);
2969 mutex_init(&hdev
->req_lock
);
2971 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2972 INIT_LIST_HEAD(&hdev
->blacklist
);
2973 INIT_LIST_HEAD(&hdev
->whitelist
);
2974 INIT_LIST_HEAD(&hdev
->uuids
);
2975 INIT_LIST_HEAD(&hdev
->link_keys
);
2976 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2977 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
2978 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2979 INIT_LIST_HEAD(&hdev
->le_white_list
);
2980 INIT_LIST_HEAD(&hdev
->le_conn_params
);
2981 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
2982 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
2983 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2984 INIT_LIST_HEAD(&hdev
->adv_instances
);
2986 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2987 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2988 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2989 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2990 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
2992 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2994 skb_queue_head_init(&hdev
->rx_q
);
2995 skb_queue_head_init(&hdev
->cmd_q
);
2996 skb_queue_head_init(&hdev
->raw_q
);
2998 init_waitqueue_head(&hdev
->req_wait_q
);
3000 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3002 hci_request_setup(hdev
);
3004 hci_init_sysfs(hdev
);
3005 discovery_init(hdev
);
3009 EXPORT_SYMBOL(hci_alloc_dev
);
3011 /* Free HCI device */
3012 void hci_free_dev(struct hci_dev
*hdev
)
3014 /* will free via device release */
3015 put_device(&hdev
->dev
);
3017 EXPORT_SYMBOL(hci_free_dev
);
3019 /* Register HCI device */
3020 int hci_register_dev(struct hci_dev
*hdev
)
3024 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3027 /* Do not allow HCI_AMP devices to register at index 0,
3028 * so the index can be used as the AMP controller ID.
3030 switch (hdev
->dev_type
) {
3032 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3035 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3044 sprintf(hdev
->name
, "hci%d", id
);
3047 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3049 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3050 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3051 if (!hdev
->workqueue
) {
3056 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3057 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3058 if (!hdev
->req_workqueue
) {
3059 destroy_workqueue(hdev
->workqueue
);
3064 if (!IS_ERR_OR_NULL(bt_debugfs
))
3065 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3067 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3069 error
= device_add(&hdev
->dev
);
3073 hci_leds_init(hdev
);
3075 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3076 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3079 if (rfkill_register(hdev
->rfkill
) < 0) {
3080 rfkill_destroy(hdev
->rfkill
);
3081 hdev
->rfkill
= NULL
;
3085 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3086 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3088 hci_dev_set_flag(hdev
, HCI_SETUP
);
3089 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3091 if (hdev
->dev_type
== HCI_PRIMARY
) {
3092 /* Assume BR/EDR support until proven otherwise (such as
3093 * through reading supported features during init.
3095 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3098 write_lock(&hci_dev_list_lock
);
3099 list_add(&hdev
->list
, &hci_dev_list
);
3100 write_unlock(&hci_dev_list_lock
);
3102 /* Devices that are marked for raw-only usage are unconfigured
3103 * and should not be included in normal operation.
3105 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3106 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3108 hci_sock_dev_event(hdev
, HCI_DEV_REG
);
3111 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3116 destroy_workqueue(hdev
->workqueue
);
3117 destroy_workqueue(hdev
->req_workqueue
);
3119 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3123 EXPORT_SYMBOL(hci_register_dev
);
3125 /* Unregister HCI device */
3126 void hci_unregister_dev(struct hci_dev
*hdev
)
3130 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3132 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3136 write_lock(&hci_dev_list_lock
);
3137 list_del(&hdev
->list
);
3138 write_unlock(&hci_dev_list_lock
);
3140 cancel_work_sync(&hdev
->power_on
);
3142 hci_dev_do_close(hdev
);
3144 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3145 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3146 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3148 mgmt_index_removed(hdev
);
3149 hci_dev_unlock(hdev
);
3152 /* mgmt_index_removed should take care of emptying the
3154 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3156 hci_sock_dev_event(hdev
, HCI_DEV_UNREG
);
3159 rfkill_unregister(hdev
->rfkill
);
3160 rfkill_destroy(hdev
->rfkill
);
3163 device_del(&hdev
->dev
);
3165 debugfs_remove_recursive(hdev
->debugfs
);
3167 destroy_workqueue(hdev
->workqueue
);
3168 destroy_workqueue(hdev
->req_workqueue
);
3171 hci_bdaddr_list_clear(&hdev
->blacklist
);
3172 hci_bdaddr_list_clear(&hdev
->whitelist
);
3173 hci_uuids_clear(hdev
);
3174 hci_link_keys_clear(hdev
);
3175 hci_smp_ltks_clear(hdev
);
3176 hci_smp_irks_clear(hdev
);
3177 hci_remote_oob_data_clear(hdev
);
3178 hci_adv_instances_clear(hdev
);
3179 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3180 hci_conn_params_clear_all(hdev
);
3181 hci_discovery_filter_clear(hdev
);
3182 hci_dev_unlock(hdev
);
3186 ida_simple_remove(&hci_index_ida
, id
);
3188 EXPORT_SYMBOL(hci_unregister_dev
);
3190 /* Suspend HCI device */
3191 int hci_suspend_dev(struct hci_dev
*hdev
)
3193 hci_sock_dev_event(hdev
, HCI_DEV_SUSPEND
);
3196 EXPORT_SYMBOL(hci_suspend_dev
);
3198 /* Resume HCI device */
3199 int hci_resume_dev(struct hci_dev
*hdev
)
3201 hci_sock_dev_event(hdev
, HCI_DEV_RESUME
);
3204 EXPORT_SYMBOL(hci_resume_dev
);
3206 /* Reset HCI device */
3207 int hci_reset_dev(struct hci_dev
*hdev
)
3209 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3210 struct sk_buff
*skb
;
3212 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3216 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
3217 memcpy(skb_put(skb
, 3), hw_err
, 3);
3219 /* Send Hardware Error to upper stack */
3220 return hci_recv_frame(hdev
, skb
);
3222 EXPORT_SYMBOL(hci_reset_dev
);
3224 /* Receive frame from HCI drivers */
3225 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3227 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3228 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3233 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
3234 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
3235 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
3241 bt_cb(skb
)->incoming
= 1;
3244 __net_timestamp(skb
);
3246 skb_queue_tail(&hdev
->rx_q
, skb
);
3247 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3251 EXPORT_SYMBOL(hci_recv_frame
);
3253 /* Receive diagnostic message from HCI drivers */
3254 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3256 /* Mark as diagnostic packet */
3257 hci_skb_pkt_type(skb
) = HCI_DIAG_PKT
;
3260 __net_timestamp(skb
);
3262 skb_queue_tail(&hdev
->rx_q
, skb
);
3263 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3267 EXPORT_SYMBOL(hci_recv_diag
);
3269 /* ---- Interface to upper protocols ---- */
3271 int hci_register_cb(struct hci_cb
*cb
)
3273 BT_DBG("%p name %s", cb
, cb
->name
);
3275 mutex_lock(&hci_cb_list_lock
);
3276 list_add_tail(&cb
->list
, &hci_cb_list
);
3277 mutex_unlock(&hci_cb_list_lock
);
3281 EXPORT_SYMBOL(hci_register_cb
);
3283 int hci_unregister_cb(struct hci_cb
*cb
)
3285 BT_DBG("%p name %s", cb
, cb
->name
);
3287 mutex_lock(&hci_cb_list_lock
);
3288 list_del(&cb
->list
);
3289 mutex_unlock(&hci_cb_list_lock
);
3293 EXPORT_SYMBOL(hci_unregister_cb
);
3295 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3299 BT_DBG("%s type %d len %d", hdev
->name
, hci_skb_pkt_type(skb
),
3303 __net_timestamp(skb
);
3305 /* Send copy to monitor */
3306 hci_send_to_monitor(hdev
, skb
);
3308 if (atomic_read(&hdev
->promisc
)) {
3309 /* Send copy to the sockets */
3310 hci_send_to_sock(hdev
, skb
);
3313 /* Get rid of skb owner, prior to sending to the driver. */
3316 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3321 err
= hdev
->send(hdev
, skb
);
3323 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
3328 /* Send HCI command */
3329 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3332 struct sk_buff
*skb
;
3334 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3336 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3338 BT_ERR("%s no memory for command", hdev
->name
);
3342 /* Stand-alone HCI commands must be flagged as
3343 * single-command requests.
3345 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
3347 skb_queue_tail(&hdev
->cmd_q
, skb
);
3348 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3353 /* Get data from the previously sent command */
3354 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3356 struct hci_command_hdr
*hdr
;
3358 if (!hdev
->sent_cmd
)
3361 hdr
= (void *) hdev
->sent_cmd
->data
;
3363 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3366 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3368 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3371 /* Send HCI command and wait for command commplete event */
3372 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3373 const void *param
, u32 timeout
)
3375 struct sk_buff
*skb
;
3377 if (!test_bit(HCI_UP
, &hdev
->flags
))
3378 return ERR_PTR(-ENETDOWN
);
3380 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3382 hci_req_sync_lock(hdev
);
3383 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3384 hci_req_sync_unlock(hdev
);
3388 EXPORT_SYMBOL(hci_cmd_sync
);
3391 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3393 struct hci_acl_hdr
*hdr
;
3396 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3397 skb_reset_transport_header(skb
);
3398 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3399 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3400 hdr
->dlen
= cpu_to_le16(len
);
3403 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3404 struct sk_buff
*skb
, __u16 flags
)
3406 struct hci_conn
*conn
= chan
->conn
;
3407 struct hci_dev
*hdev
= conn
->hdev
;
3408 struct sk_buff
*list
;
3410 skb
->len
= skb_headlen(skb
);
3413 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3415 switch (hdev
->dev_type
) {
3417 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3420 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3423 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3427 list
= skb_shinfo(skb
)->frag_list
;
3429 /* Non fragmented */
3430 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3432 skb_queue_tail(queue
, skb
);
3435 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3437 skb_shinfo(skb
)->frag_list
= NULL
;
3439 /* Queue all fragments atomically. We need to use spin_lock_bh
3440 * here because of 6LoWPAN links, as there this function is
3441 * called from softirq and using normal spin lock could cause
3444 spin_lock_bh(&queue
->lock
);
3446 __skb_queue_tail(queue
, skb
);
3448 flags
&= ~ACL_START
;
3451 skb
= list
; list
= list
->next
;
3453 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3454 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3456 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3458 __skb_queue_tail(queue
, skb
);
3461 spin_unlock_bh(&queue
->lock
);
3465 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3467 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3469 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3471 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3473 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3477 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3479 struct hci_dev
*hdev
= conn
->hdev
;
3480 struct hci_sco_hdr hdr
;
3482 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3484 hdr
.handle
= cpu_to_le16(conn
->handle
);
3485 hdr
.dlen
= skb
->len
;
3487 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3488 skb_reset_transport_header(skb
);
3489 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3491 hci_skb_pkt_type(skb
) = HCI_SCODATA_PKT
;
3493 skb_queue_tail(&conn
->data_q
, skb
);
3494 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3497 /* ---- HCI TX task (outgoing data) ---- */
3499 /* HCI Connection scheduler */
3500 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3503 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3504 struct hci_conn
*conn
= NULL
, *c
;
3505 unsigned int num
= 0, min
= ~0;
3507 /* We don't have to lock device here. Connections are always
3508 * added and removed with TX task disabled. */
3512 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3513 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3516 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3521 if (c
->sent
< min
) {
3526 if (hci_conn_num(hdev
, type
) == num
)
3535 switch (conn
->type
) {
3537 cnt
= hdev
->acl_cnt
;
3541 cnt
= hdev
->sco_cnt
;
3544 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3548 BT_ERR("Unknown link type");
3556 BT_DBG("conn %p quote %d", conn
, *quote
);
3560 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3562 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3565 BT_ERR("%s link tx timeout", hdev
->name
);
3569 /* Kill stalled connections */
3570 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3571 if (c
->type
== type
&& c
->sent
) {
3572 BT_ERR("%s killing stalled connection %pMR",
3573 hdev
->name
, &c
->dst
);
3574 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3581 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3584 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3585 struct hci_chan
*chan
= NULL
;
3586 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3587 struct hci_conn
*conn
;
3588 int cnt
, q
, conn_num
= 0;
3590 BT_DBG("%s", hdev
->name
);
3594 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3595 struct hci_chan
*tmp
;
3597 if (conn
->type
!= type
)
3600 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3605 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3606 struct sk_buff
*skb
;
3608 if (skb_queue_empty(&tmp
->data_q
))
3611 skb
= skb_peek(&tmp
->data_q
);
3612 if (skb
->priority
< cur_prio
)
3615 if (skb
->priority
> cur_prio
) {
3618 cur_prio
= skb
->priority
;
3623 if (conn
->sent
< min
) {
3629 if (hci_conn_num(hdev
, type
) == conn_num
)
3638 switch (chan
->conn
->type
) {
3640 cnt
= hdev
->acl_cnt
;
3643 cnt
= hdev
->block_cnt
;
3647 cnt
= hdev
->sco_cnt
;
3650 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3654 BT_ERR("Unknown link type");
3659 BT_DBG("chan %p quote %d", chan
, *quote
);
3663 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3665 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3666 struct hci_conn
*conn
;
3669 BT_DBG("%s", hdev
->name
);
3673 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3674 struct hci_chan
*chan
;
3676 if (conn
->type
!= type
)
3679 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3684 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3685 struct sk_buff
*skb
;
3692 if (skb_queue_empty(&chan
->data_q
))
3695 skb
= skb_peek(&chan
->data_q
);
3696 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3699 skb
->priority
= HCI_PRIO_MAX
- 1;
3701 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3705 if (hci_conn_num(hdev
, type
) == num
)
3713 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3715 /* Calculate count of blocks used by this packet */
3716 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3719 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3721 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3722 /* ACL tx timeout must be longer than maximum
3723 * link supervision timeout (40.9 seconds) */
3724 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3725 HCI_ACL_TX_TIMEOUT
))
3726 hci_link_tx_to(hdev
, ACL_LINK
);
3730 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3732 unsigned int cnt
= hdev
->acl_cnt
;
3733 struct hci_chan
*chan
;
3734 struct sk_buff
*skb
;
3737 __check_timeout(hdev
, cnt
);
3739 while (hdev
->acl_cnt
&&
3740 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3741 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3742 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3743 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3744 skb
->len
, skb
->priority
);
3746 /* Stop if priority has changed */
3747 if (skb
->priority
< priority
)
3750 skb
= skb_dequeue(&chan
->data_q
);
3752 hci_conn_enter_active_mode(chan
->conn
,
3753 bt_cb(skb
)->force_active
);
3755 hci_send_frame(hdev
, skb
);
3756 hdev
->acl_last_tx
= jiffies
;
3764 if (cnt
!= hdev
->acl_cnt
)
3765 hci_prio_recalculate(hdev
, ACL_LINK
);
3768 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3770 unsigned int cnt
= hdev
->block_cnt
;
3771 struct hci_chan
*chan
;
3772 struct sk_buff
*skb
;
3776 __check_timeout(hdev
, cnt
);
3778 BT_DBG("%s", hdev
->name
);
3780 if (hdev
->dev_type
== HCI_AMP
)
3785 while (hdev
->block_cnt
> 0 &&
3786 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3787 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3788 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3791 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3792 skb
->len
, skb
->priority
);
3794 /* Stop if priority has changed */
3795 if (skb
->priority
< priority
)
3798 skb
= skb_dequeue(&chan
->data_q
);
3800 blocks
= __get_blocks(hdev
, skb
);
3801 if (blocks
> hdev
->block_cnt
)
3804 hci_conn_enter_active_mode(chan
->conn
,
3805 bt_cb(skb
)->force_active
);
3807 hci_send_frame(hdev
, skb
);
3808 hdev
->acl_last_tx
= jiffies
;
3810 hdev
->block_cnt
-= blocks
;
3813 chan
->sent
+= blocks
;
3814 chan
->conn
->sent
+= blocks
;
3818 if (cnt
!= hdev
->block_cnt
)
3819 hci_prio_recalculate(hdev
, type
);
3822 static void hci_sched_acl(struct hci_dev
*hdev
)
3824 BT_DBG("%s", hdev
->name
);
3826 /* No ACL link over BR/EDR controller */
3827 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_PRIMARY
)
3830 /* No AMP link over AMP controller */
3831 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3834 switch (hdev
->flow_ctl_mode
) {
3835 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3836 hci_sched_acl_pkt(hdev
);
3839 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3840 hci_sched_acl_blk(hdev
);
3846 static void hci_sched_sco(struct hci_dev
*hdev
)
3848 struct hci_conn
*conn
;
3849 struct sk_buff
*skb
;
3852 BT_DBG("%s", hdev
->name
);
3854 if (!hci_conn_num(hdev
, SCO_LINK
))
3857 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3858 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3859 BT_DBG("skb %p len %d", skb
, skb
->len
);
3860 hci_send_frame(hdev
, skb
);
3863 if (conn
->sent
== ~0)
3869 static void hci_sched_esco(struct hci_dev
*hdev
)
3871 struct hci_conn
*conn
;
3872 struct sk_buff
*skb
;
3875 BT_DBG("%s", hdev
->name
);
3877 if (!hci_conn_num(hdev
, ESCO_LINK
))
3880 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3882 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3883 BT_DBG("skb %p len %d", skb
, skb
->len
);
3884 hci_send_frame(hdev
, skb
);
3887 if (conn
->sent
== ~0)
3893 static void hci_sched_le(struct hci_dev
*hdev
)
3895 struct hci_chan
*chan
;
3896 struct sk_buff
*skb
;
3897 int quote
, cnt
, tmp
;
3899 BT_DBG("%s", hdev
->name
);
3901 if (!hci_conn_num(hdev
, LE_LINK
))
3904 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3905 /* LE tx timeout must be longer than maximum
3906 * link supervision timeout (40.9 seconds) */
3907 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3908 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3909 hci_link_tx_to(hdev
, LE_LINK
);
3912 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3914 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3915 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3916 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3917 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3918 skb
->len
, skb
->priority
);
3920 /* Stop if priority has changed */
3921 if (skb
->priority
< priority
)
3924 skb
= skb_dequeue(&chan
->data_q
);
3926 hci_send_frame(hdev
, skb
);
3927 hdev
->le_last_tx
= jiffies
;
3938 hdev
->acl_cnt
= cnt
;
3941 hci_prio_recalculate(hdev
, LE_LINK
);
3944 static void hci_tx_work(struct work_struct
*work
)
3946 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3947 struct sk_buff
*skb
;
3949 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3950 hdev
->sco_cnt
, hdev
->le_cnt
);
3952 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
3953 /* Schedule queues and send stuff to HCI driver */
3954 hci_sched_acl(hdev
);
3955 hci_sched_sco(hdev
);
3956 hci_sched_esco(hdev
);
3960 /* Send next queued raw (unknown type) packet */
3961 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3962 hci_send_frame(hdev
, skb
);
3965 /* ----- HCI RX task (incoming data processing) ----- */
3967 /* ACL data packet */
3968 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3970 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3971 struct hci_conn
*conn
;
3972 __u16 handle
, flags
;
3974 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3976 handle
= __le16_to_cpu(hdr
->handle
);
3977 flags
= hci_flags(handle
);
3978 handle
= hci_handle(handle
);
3980 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3983 hdev
->stat
.acl_rx
++;
3986 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3987 hci_dev_unlock(hdev
);
3990 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3992 /* Send to upper protocol */
3993 l2cap_recv_acldata(conn
, skb
, flags
);
3996 BT_ERR("%s ACL packet for unknown connection handle %d",
3997 hdev
->name
, handle
);
4003 /* SCO data packet */
4004 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4006 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4007 struct hci_conn
*conn
;
4010 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4012 handle
= __le16_to_cpu(hdr
->handle
);
4014 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4016 hdev
->stat
.sco_rx
++;
4019 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4020 hci_dev_unlock(hdev
);
4023 /* Send to upper protocol */
4024 sco_recv_scodata(conn
, skb
);
4027 BT_ERR("%s SCO packet for unknown connection handle %d",
4028 hdev
->name
, handle
);
4034 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4036 struct sk_buff
*skb
;
4038 skb
= skb_peek(&hdev
->cmd_q
);
4042 return (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
);
4045 static void hci_resend_last(struct hci_dev
*hdev
)
4047 struct hci_command_hdr
*sent
;
4048 struct sk_buff
*skb
;
4051 if (!hdev
->sent_cmd
)
4054 sent
= (void *) hdev
->sent_cmd
->data
;
4055 opcode
= __le16_to_cpu(sent
->opcode
);
4056 if (opcode
== HCI_OP_RESET
)
4059 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4063 skb_queue_head(&hdev
->cmd_q
, skb
);
4064 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4067 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4068 hci_req_complete_t
*req_complete
,
4069 hci_req_complete_skb_t
*req_complete_skb
)
4071 struct sk_buff
*skb
;
4072 unsigned long flags
;
4074 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4076 /* If the completed command doesn't match the last one that was
4077 * sent we need to do special handling of it.
4079 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4080 /* Some CSR based controllers generate a spontaneous
4081 * reset complete event during init and any pending
4082 * command will never be completed. In such a case we
4083 * need to resend whatever was the last sent
4086 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4087 hci_resend_last(hdev
);
4092 /* If the command succeeded and there's still more commands in
4093 * this request the request is not yet complete.
4095 if (!status
&& !hci_req_is_complete(hdev
))
4098 /* If this was the last command in a request the complete
4099 * callback would be found in hdev->sent_cmd instead of the
4100 * command queue (hdev->cmd_q).
4102 if (bt_cb(hdev
->sent_cmd
)->hci
.req_flags
& HCI_REQ_SKB
) {
4103 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete_skb
;
4107 if (bt_cb(hdev
->sent_cmd
)->hci
.req_complete
) {
4108 *req_complete
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete
;
4112 /* Remove all pending commands belonging to this request */
4113 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4114 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4115 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
) {
4116 __skb_queue_head(&hdev
->cmd_q
, skb
);
4120 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
)
4121 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
4123 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
4126 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4129 static void hci_rx_work(struct work_struct
*work
)
4131 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4132 struct sk_buff
*skb
;
4134 BT_DBG("%s", hdev
->name
);
4136 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4137 /* Send copy to monitor */
4138 hci_send_to_monitor(hdev
, skb
);
4140 if (atomic_read(&hdev
->promisc
)) {
4141 /* Send copy to the sockets */
4142 hci_send_to_sock(hdev
, skb
);
4145 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4150 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4151 /* Don't process data packets in this states. */
4152 switch (hci_skb_pkt_type(skb
)) {
4153 case HCI_ACLDATA_PKT
:
4154 case HCI_SCODATA_PKT
:
4161 switch (hci_skb_pkt_type(skb
)) {
4163 BT_DBG("%s Event packet", hdev
->name
);
4164 hci_event_packet(hdev
, skb
);
4167 case HCI_ACLDATA_PKT
:
4168 BT_DBG("%s ACL data packet", hdev
->name
);
4169 hci_acldata_packet(hdev
, skb
);
4172 case HCI_SCODATA_PKT
:
4173 BT_DBG("%s SCO data packet", hdev
->name
);
4174 hci_scodata_packet(hdev
, skb
);
4184 static void hci_cmd_work(struct work_struct
*work
)
4186 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4187 struct sk_buff
*skb
;
4189 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4190 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4192 /* Send queued commands */
4193 if (atomic_read(&hdev
->cmd_cnt
)) {
4194 skb
= skb_dequeue(&hdev
->cmd_q
);
4198 kfree_skb(hdev
->sent_cmd
);
4200 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4201 if (hdev
->sent_cmd
) {
4202 atomic_dec(&hdev
->cmd_cnt
);
4203 hci_send_frame(hdev
, skb
);
4204 if (test_bit(HCI_RESET
, &hdev
->flags
))
4205 cancel_delayed_work(&hdev
->cmd_timer
);
4207 schedule_delayed_work(&hdev
->cmd_timer
,
4210 skb_queue_head(&hdev
->cmd_q
, skb
);
4211 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);