2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev
*hdev
)
360 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
361 struct inquiry_entry
*next
= cache
->list
, *e
;
363 BT_DBG("cache %p", cache
);
372 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
374 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
375 struct inquiry_entry
*e
;
377 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
379 for (e
= cache
->list
; e
; e
= e
->next
)
380 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
385 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
387 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
388 struct inquiry_entry
*ie
;
390 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
392 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
394 /* Entry not in the cache. Add new one. */
395 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
399 ie
->next
= cache
->list
;
403 memcpy(&ie
->data
, data
, sizeof(*data
));
404 ie
->timestamp
= jiffies
;
405 cache
->timestamp
= jiffies
;
408 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
410 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
411 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
412 struct inquiry_entry
*e
;
415 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
416 struct inquiry_data
*data
= &e
->data
;
417 bacpy(&info
->bdaddr
, &data
->bdaddr
);
418 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
419 info
->pscan_period_mode
= data
->pscan_period_mode
;
420 info
->pscan_mode
= data
->pscan_mode
;
421 memcpy(info
->dev_class
, data
->dev_class
, 3);
422 info
->clock_offset
= data
->clock_offset
;
426 BT_DBG("cache %p, copied %d", cache
, copied
);
430 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
432 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
433 struct hci_cp_inquiry cp
;
435 BT_DBG("%s", hdev
->name
);
437 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
441 memcpy(&cp
.lap
, &ir
->lap
, 3);
442 cp
.length
= ir
->length
;
443 cp
.num_rsp
= ir
->num_rsp
;
444 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
447 int hci_inquiry(void __user
*arg
)
449 __u8 __user
*ptr
= arg
;
450 struct hci_inquiry_req ir
;
451 struct hci_dev
*hdev
;
452 int err
= 0, do_inquiry
= 0, max_rsp
;
456 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
459 hdev
= hci_dev_get(ir
.dev_id
);
464 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
465 inquiry_cache_empty(hdev
) ||
466 ir
.flags
& IREQ_CACHE_FLUSH
) {
467 inquiry_cache_flush(hdev
);
470 hci_dev_unlock(hdev
);
472 timeo
= ir
.length
* msecs_to_jiffies(2000);
475 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
486 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
493 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
494 hci_dev_unlock(hdev
);
496 BT_DBG("num_rsp %d", ir
.num_rsp
);
498 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
500 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
513 /* ---- HCI ioctl helpers ---- */
515 int hci_dev_open(__u16 dev
)
517 struct hci_dev
*hdev
;
520 hdev
= hci_dev_get(dev
);
524 BT_DBG("%s %p", hdev
->name
, hdev
);
528 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
533 if (test_bit(HCI_UP
, &hdev
->flags
)) {
538 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
539 set_bit(HCI_RAW
, &hdev
->flags
);
541 /* Treat all non BR/EDR controllers as raw devices if
542 enable_hs is not set */
543 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
544 set_bit(HCI_RAW
, &hdev
->flags
);
546 if (hdev
->open(hdev
)) {
551 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
552 atomic_set(&hdev
->cmd_cnt
, 1);
553 set_bit(HCI_INIT
, &hdev
->flags
);
554 hdev
->init_last_cmd
= 0;
556 ret
= __hci_request(hdev
, hci_init_req
, 0,
557 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
559 if (lmp_host_le_capable(hdev
))
560 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
563 clear_bit(HCI_INIT
, &hdev
->flags
);
568 set_bit(HCI_UP
, &hdev
->flags
);
569 hci_notify(hdev
, HCI_DEV_UP
);
570 if (!test_bit(HCI_SETUP
, &hdev
->flags
)) {
572 mgmt_powered(hdev
, 1);
573 hci_dev_unlock(hdev
);
576 /* Init failed, cleanup */
577 flush_work(&hdev
->tx_work
);
578 flush_work(&hdev
->cmd_work
);
579 flush_work(&hdev
->rx_work
);
581 skb_queue_purge(&hdev
->cmd_q
);
582 skb_queue_purge(&hdev
->rx_q
);
587 if (hdev
->sent_cmd
) {
588 kfree_skb(hdev
->sent_cmd
);
589 hdev
->sent_cmd
= NULL
;
597 hci_req_unlock(hdev
);
602 static int hci_dev_do_close(struct hci_dev
*hdev
)
604 BT_DBG("%s %p", hdev
->name
, hdev
);
606 hci_req_cancel(hdev
, ENODEV
);
609 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
610 del_timer_sync(&hdev
->cmd_timer
);
611 hci_req_unlock(hdev
);
615 /* Flush RX and TX works */
616 flush_work(&hdev
->tx_work
);
617 flush_work(&hdev
->rx_work
);
619 if (hdev
->discov_timeout
> 0) {
620 cancel_delayed_work(&hdev
->discov_off
);
621 hdev
->discov_timeout
= 0;
624 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
625 cancel_delayed_work(&hdev
->power_off
);
627 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->flags
))
628 cancel_delayed_work(&hdev
->service_cache
);
631 inquiry_cache_flush(hdev
);
632 hci_conn_hash_flush(hdev
);
633 hci_dev_unlock(hdev
);
635 hci_notify(hdev
, HCI_DEV_DOWN
);
641 skb_queue_purge(&hdev
->cmd_q
);
642 atomic_set(&hdev
->cmd_cnt
, 1);
643 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
644 set_bit(HCI_INIT
, &hdev
->flags
);
645 __hci_request(hdev
, hci_reset_req
, 0,
646 msecs_to_jiffies(250));
647 clear_bit(HCI_INIT
, &hdev
->flags
);
651 flush_work(&hdev
->cmd_work
);
654 skb_queue_purge(&hdev
->rx_q
);
655 skb_queue_purge(&hdev
->cmd_q
);
656 skb_queue_purge(&hdev
->raw_q
);
658 /* Drop last sent command */
659 if (hdev
->sent_cmd
) {
660 del_timer_sync(&hdev
->cmd_timer
);
661 kfree_skb(hdev
->sent_cmd
);
662 hdev
->sent_cmd
= NULL
;
665 /* After this point our queues are empty
666 * and no tasks are scheduled. */
670 mgmt_powered(hdev
, 0);
671 hci_dev_unlock(hdev
);
676 hci_req_unlock(hdev
);
682 int hci_dev_close(__u16 dev
)
684 struct hci_dev
*hdev
;
687 hdev
= hci_dev_get(dev
);
690 err
= hci_dev_do_close(hdev
);
695 int hci_dev_reset(__u16 dev
)
697 struct hci_dev
*hdev
;
700 hdev
= hci_dev_get(dev
);
706 if (!test_bit(HCI_UP
, &hdev
->flags
))
710 skb_queue_purge(&hdev
->rx_q
);
711 skb_queue_purge(&hdev
->cmd_q
);
714 inquiry_cache_flush(hdev
);
715 hci_conn_hash_flush(hdev
);
716 hci_dev_unlock(hdev
);
721 atomic_set(&hdev
->cmd_cnt
, 1);
722 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
724 if (!test_bit(HCI_RAW
, &hdev
->flags
))
725 ret
= __hci_request(hdev
, hci_reset_req
, 0,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
729 hci_req_unlock(hdev
);
734 int hci_dev_reset_stat(__u16 dev
)
736 struct hci_dev
*hdev
;
739 hdev
= hci_dev_get(dev
);
743 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
750 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
752 struct hci_dev
*hdev
;
753 struct hci_dev_req dr
;
756 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
759 hdev
= hci_dev_get(dr
.dev_id
);
765 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
770 if (!lmp_encrypt_capable(hdev
)) {
775 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
776 /* Auth must be enabled first */
777 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
783 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
788 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
789 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
793 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
798 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
799 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
803 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
807 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
808 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
812 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
813 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
825 int hci_get_dev_list(void __user
*arg
)
827 struct hci_dev
*hdev
;
828 struct hci_dev_list_req
*dl
;
829 struct hci_dev_req
*dr
;
830 int n
= 0, size
, err
;
833 if (get_user(dev_num
, (__u16 __user
*) arg
))
836 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
839 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
841 dl
= kzalloc(size
, GFP_KERNEL
);
847 read_lock(&hci_dev_list_lock
);
848 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
849 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
850 cancel_delayed_work(&hdev
->power_off
);
852 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
853 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
855 (dr
+ n
)->dev_id
= hdev
->id
;
856 (dr
+ n
)->dev_opt
= hdev
->flags
;
861 read_unlock(&hci_dev_list_lock
);
864 size
= sizeof(*dl
) + n
* sizeof(*dr
);
866 err
= copy_to_user(arg
, dl
, size
);
869 return err
? -EFAULT
: 0;
872 int hci_get_dev_info(void __user
*arg
)
874 struct hci_dev
*hdev
;
875 struct hci_dev_info di
;
878 if (copy_from_user(&di
, arg
, sizeof(di
)))
881 hdev
= hci_dev_get(di
.dev_id
);
885 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->flags
))
886 cancel_delayed_work_sync(&hdev
->power_off
);
888 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
889 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
891 strcpy(di
.name
, hdev
->name
);
892 di
.bdaddr
= hdev
->bdaddr
;
893 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
894 di
.flags
= hdev
->flags
;
895 di
.pkt_type
= hdev
->pkt_type
;
896 di
.acl_mtu
= hdev
->acl_mtu
;
897 di
.acl_pkts
= hdev
->acl_pkts
;
898 di
.sco_mtu
= hdev
->sco_mtu
;
899 di
.sco_pkts
= hdev
->sco_pkts
;
900 di
.link_policy
= hdev
->link_policy
;
901 di
.link_mode
= hdev
->link_mode
;
903 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
904 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
906 if (copy_to_user(arg
, &di
, sizeof(di
)))
914 /* ---- Interface to HCI drivers ---- */
916 static int hci_rfkill_set_block(void *data
, bool blocked
)
918 struct hci_dev
*hdev
= data
;
920 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
925 hci_dev_do_close(hdev
);
930 static const struct rfkill_ops hci_rfkill_ops
= {
931 .set_block
= hci_rfkill_set_block
,
934 /* Alloc HCI device */
935 struct hci_dev
*hci_alloc_dev(void)
937 struct hci_dev
*hdev
;
939 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
943 hci_init_sysfs(hdev
);
944 skb_queue_head_init(&hdev
->driver_init
);
948 EXPORT_SYMBOL(hci_alloc_dev
);
950 /* Free HCI device */
951 void hci_free_dev(struct hci_dev
*hdev
)
953 skb_queue_purge(&hdev
->driver_init
);
955 /* will free via device release */
956 put_device(&hdev
->dev
);
958 EXPORT_SYMBOL(hci_free_dev
);
960 static void hci_power_on(struct work_struct
*work
)
962 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
964 BT_DBG("%s", hdev
->name
);
966 if (hci_dev_open(hdev
->id
) < 0)
969 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
970 schedule_delayed_work(&hdev
->power_off
,
971 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
973 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
974 mgmt_index_added(hdev
);
977 static void hci_power_off(struct work_struct
*work
)
979 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
982 BT_DBG("%s", hdev
->name
);
984 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
986 hci_dev_close(hdev
->id
);
989 static void hci_discov_off(struct work_struct
*work
)
991 struct hci_dev
*hdev
;
994 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
996 BT_DBG("%s", hdev
->name
);
1000 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1002 hdev
->discov_timeout
= 0;
1004 hci_dev_unlock(hdev
);
1007 int hci_uuids_clear(struct hci_dev
*hdev
)
1009 struct list_head
*p
, *n
;
1011 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1012 struct bt_uuid
*uuid
;
1014 uuid
= list_entry(p
, struct bt_uuid
, list
);
1023 int hci_link_keys_clear(struct hci_dev
*hdev
)
1025 struct list_head
*p
, *n
;
1027 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1028 struct link_key
*key
;
1030 key
= list_entry(p
, struct link_key
, list
);
1039 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1043 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1044 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1050 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1051 u8 key_type
, u8 old_key_type
)
1054 if (key_type
< 0x03)
1057 /* Debug keys are insecure so don't store them persistently */
1058 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1061 /* Changed combination key and there's no previous one */
1062 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1065 /* Security mode 3 case */
1069 /* Neither local nor remote side had no-bonding as requirement */
1070 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1073 /* Local side had dedicated bonding as requirement */
1074 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1077 /* Remote side had dedicated bonding as requirement */
1078 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1081 /* If none of the above criteria match, then don't store the key
1086 struct link_key
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1090 list_for_each_entry(k
, &hdev
->link_keys
, list
) {
1091 struct key_master_id
*id
;
1093 if (k
->type
!= HCI_LK_SMP_LTK
)
1096 if (k
->dlen
!= sizeof(*id
))
1099 id
= (void *) &k
->data
;
1100 if (id
->ediv
== ediv
&&
1101 (memcmp(rand
, id
->rand
, sizeof(id
->rand
)) == 0))
1107 EXPORT_SYMBOL(hci_find_ltk
);
1109 struct link_key
*hci_find_link_key_type(struct hci_dev
*hdev
,
1110 bdaddr_t
*bdaddr
, u8 type
)
1114 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1115 if (k
->type
== type
&& bacmp(bdaddr
, &k
->bdaddr
) == 0)
1120 EXPORT_SYMBOL(hci_find_link_key_type
);
1122 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1123 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1125 struct link_key
*key
, *old_key
;
1126 u8 old_key_type
, persistent
;
1128 old_key
= hci_find_link_key(hdev
, bdaddr
);
1130 old_key_type
= old_key
->type
;
1133 old_key_type
= conn
? conn
->key_type
: 0xff;
1134 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1137 list_add(&key
->list
, &hdev
->link_keys
);
1140 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1142 /* Some buggy controller combinations generate a changed
1143 * combination key for legacy pairing even when there's no
1145 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1146 (!conn
|| conn
->remote_auth
== 0xff) &&
1147 old_key_type
== 0xff) {
1148 type
= HCI_LK_COMBINATION
;
1150 conn
->key_type
= type
;
1153 bacpy(&key
->bdaddr
, bdaddr
);
1154 memcpy(key
->val
, val
, 16);
1155 key
->pin_len
= pin_len
;
1157 if (type
== HCI_LK_CHANGED_COMBINATION
)
1158 key
->type
= old_key_type
;
1165 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1167 mgmt_new_link_key(hdev
, key
, persistent
);
1170 list_del(&key
->list
);
1177 int hci_add_ltk(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1178 u8 key_size
, __le16 ediv
, u8 rand
[8], u8 ltk
[16])
1180 struct link_key
*key
, *old_key
;
1181 struct key_master_id
*id
;
1184 BT_DBG("%s addr %s", hdev
->name
, batostr(bdaddr
));
1186 old_key
= hci_find_link_key_type(hdev
, bdaddr
, HCI_LK_SMP_LTK
);
1189 old_key_type
= old_key
->type
;
1191 key
= kzalloc(sizeof(*key
) + sizeof(*id
), GFP_ATOMIC
);
1194 list_add(&key
->list
, &hdev
->link_keys
);
1195 old_key_type
= 0xff;
1198 key
->dlen
= sizeof(*id
);
1200 bacpy(&key
->bdaddr
, bdaddr
);
1201 memcpy(key
->val
, ltk
, sizeof(key
->val
));
1202 key
->type
= HCI_LK_SMP_LTK
;
1203 key
->pin_len
= key_size
;
1205 id
= (void *) &key
->data
;
1207 memcpy(id
->rand
, rand
, sizeof(id
->rand
));
1210 mgmt_new_link_key(hdev
, key
, old_key_type
);
1215 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1217 struct link_key
*key
;
1219 key
= hci_find_link_key(hdev
, bdaddr
);
1223 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1225 list_del(&key
->list
);
1231 /* HCI command timer function */
1232 static void hci_cmd_timer(unsigned long arg
)
1234 struct hci_dev
*hdev
= (void *) arg
;
1236 BT_ERR("%s command tx timeout", hdev
->name
);
1237 atomic_set(&hdev
->cmd_cnt
, 1);
1238 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1241 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1244 struct oob_data
*data
;
1246 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1247 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1253 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1255 struct oob_data
*data
;
1257 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1261 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1263 list_del(&data
->list
);
1269 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1271 struct oob_data
*data
, *n
;
1273 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1274 list_del(&data
->list
);
1281 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1284 struct oob_data
*data
;
1286 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1289 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1293 bacpy(&data
->bdaddr
, bdaddr
);
1294 list_add(&data
->list
, &hdev
->remote_oob_data
);
1297 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1298 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1300 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1305 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1308 struct bdaddr_list
*b
;
1310 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1311 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1317 int hci_blacklist_clear(struct hci_dev
*hdev
)
1319 struct list_head
*p
, *n
;
1321 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1322 struct bdaddr_list
*b
;
1324 b
= list_entry(p
, struct bdaddr_list
, list
);
1333 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1335 struct bdaddr_list
*entry
;
1337 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1340 if (hci_blacklist_lookup(hdev
, bdaddr
))
1343 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1347 bacpy(&entry
->bdaddr
, bdaddr
);
1349 list_add(&entry
->list
, &hdev
->blacklist
);
1351 return mgmt_device_blocked(hdev
, bdaddr
);
1354 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1356 struct bdaddr_list
*entry
;
1358 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1359 return hci_blacklist_clear(hdev
);
1361 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1365 list_del(&entry
->list
);
1368 return mgmt_device_unblocked(hdev
, bdaddr
);
1371 static void hci_clear_adv_cache(struct work_struct
*work
)
1373 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1378 hci_adv_entries_clear(hdev
);
1380 hci_dev_unlock(hdev
);
1383 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1385 struct adv_entry
*entry
, *tmp
;
1387 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1388 list_del(&entry
->list
);
1392 BT_DBG("%s adv cache cleared", hdev
->name
);
1397 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1399 struct adv_entry
*entry
;
1401 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1402 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1408 static inline int is_connectable_adv(u8 evt_type
)
1410 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1416 int hci_add_adv_entry(struct hci_dev
*hdev
,
1417 struct hci_ev_le_advertising_info
*ev
)
1419 struct adv_entry
*entry
;
1421 if (!is_connectable_adv(ev
->evt_type
))
1424 /* Only new entries should be added to adv_entries. So, if
1425 * bdaddr was found, don't add it. */
1426 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1429 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
1433 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1434 entry
->bdaddr_type
= ev
->bdaddr_type
;
1436 list_add(&entry
->list
, &hdev
->adv_entries
);
1438 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1439 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1444 /* Register HCI device */
1445 int hci_register_dev(struct hci_dev
*hdev
)
1447 struct list_head
*head
= &hci_dev_list
, *p
;
1450 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1451 hdev
->bus
, hdev
->owner
);
1453 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1456 /* Do not allow HCI_AMP devices to register at index 0,
1457 * so the index can be used as the AMP controller ID.
1459 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1461 write_lock(&hci_dev_list_lock
);
1463 /* Find first available device id */
1464 list_for_each(p
, &hci_dev_list
) {
1465 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1470 sprintf(hdev
->name
, "hci%d", id
);
1472 list_add_tail(&hdev
->list
, head
);
1474 atomic_set(&hdev
->refcnt
, 1);
1475 mutex_init(&hdev
->lock
);
1478 hdev
->dev_flags
= 0;
1479 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1480 hdev
->esco_type
= (ESCO_HV1
);
1481 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1482 hdev
->io_capability
= 0x03; /* No Input No Output */
1484 hdev
->idle_timeout
= 0;
1485 hdev
->sniff_max_interval
= 800;
1486 hdev
->sniff_min_interval
= 80;
1488 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1489 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1490 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1493 skb_queue_head_init(&hdev
->rx_q
);
1494 skb_queue_head_init(&hdev
->cmd_q
);
1495 skb_queue_head_init(&hdev
->raw_q
);
1497 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1499 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1500 hdev
->reassembly
[i
] = NULL
;
1502 init_waitqueue_head(&hdev
->req_wait_q
);
1503 mutex_init(&hdev
->req_lock
);
1505 inquiry_cache_init(hdev
);
1507 hci_conn_hash_init(hdev
);
1509 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1511 INIT_LIST_HEAD(&hdev
->blacklist
);
1513 INIT_LIST_HEAD(&hdev
->uuids
);
1515 INIT_LIST_HEAD(&hdev
->link_keys
);
1517 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1519 INIT_LIST_HEAD(&hdev
->adv_entries
);
1521 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1522 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1523 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1525 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1527 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1529 atomic_set(&hdev
->promisc
, 0);
1531 write_unlock(&hci_dev_list_lock
);
1533 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1535 if (!hdev
->workqueue
) {
1540 error
= hci_add_sysfs(hdev
);
1544 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1545 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1547 if (rfkill_register(hdev
->rfkill
) < 0) {
1548 rfkill_destroy(hdev
->rfkill
);
1549 hdev
->rfkill
= NULL
;
1553 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1554 set_bit(HCI_SETUP
, &hdev
->flags
);
1555 schedule_work(&hdev
->power_on
);
1557 hci_notify(hdev
, HCI_DEV_REG
);
1562 destroy_workqueue(hdev
->workqueue
);
1564 write_lock(&hci_dev_list_lock
);
1565 list_del(&hdev
->list
);
1566 write_unlock(&hci_dev_list_lock
);
1570 EXPORT_SYMBOL(hci_register_dev
);
1572 /* Unregister HCI device */
1573 void hci_unregister_dev(struct hci_dev
*hdev
)
1577 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1579 write_lock(&hci_dev_list_lock
);
1580 list_del(&hdev
->list
);
1581 write_unlock(&hci_dev_list_lock
);
1583 hci_dev_do_close(hdev
);
1585 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1586 kfree_skb(hdev
->reassembly
[i
]);
1588 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1589 !test_bit(HCI_SETUP
, &hdev
->flags
)) {
1591 mgmt_index_removed(hdev
);
1592 hci_dev_unlock(hdev
);
1595 /* mgmt_index_removed should take care of emptying the
1597 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1599 hci_notify(hdev
, HCI_DEV_UNREG
);
1602 rfkill_unregister(hdev
->rfkill
);
1603 rfkill_destroy(hdev
->rfkill
);
1606 hci_del_sysfs(hdev
);
1608 cancel_delayed_work_sync(&hdev
->adv_work
);
1610 destroy_workqueue(hdev
->workqueue
);
1613 hci_blacklist_clear(hdev
);
1614 hci_uuids_clear(hdev
);
1615 hci_link_keys_clear(hdev
);
1616 hci_remote_oob_data_clear(hdev
);
1617 hci_adv_entries_clear(hdev
);
1618 hci_dev_unlock(hdev
);
1620 __hci_dev_put(hdev
);
1622 EXPORT_SYMBOL(hci_unregister_dev
);
1624 /* Suspend HCI device */
1625 int hci_suspend_dev(struct hci_dev
*hdev
)
1627 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1630 EXPORT_SYMBOL(hci_suspend_dev
);
1632 /* Resume HCI device */
1633 int hci_resume_dev(struct hci_dev
*hdev
)
1635 hci_notify(hdev
, HCI_DEV_RESUME
);
1638 EXPORT_SYMBOL(hci_resume_dev
);
1640 /* Receive frame from HCI drivers */
1641 int hci_recv_frame(struct sk_buff
*skb
)
1643 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1644 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1645 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1651 bt_cb(skb
)->incoming
= 1;
1654 __net_timestamp(skb
);
1656 skb_queue_tail(&hdev
->rx_q
, skb
);
1657 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1661 EXPORT_SYMBOL(hci_recv_frame
);
1663 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1664 int count
, __u8 index
)
1669 struct sk_buff
*skb
;
1670 struct bt_skb_cb
*scb
;
1672 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1673 index
>= NUM_REASSEMBLY
)
1676 skb
= hdev
->reassembly
[index
];
1680 case HCI_ACLDATA_PKT
:
1681 len
= HCI_MAX_FRAME_SIZE
;
1682 hlen
= HCI_ACL_HDR_SIZE
;
1685 len
= HCI_MAX_EVENT_SIZE
;
1686 hlen
= HCI_EVENT_HDR_SIZE
;
1688 case HCI_SCODATA_PKT
:
1689 len
= HCI_MAX_SCO_SIZE
;
1690 hlen
= HCI_SCO_HDR_SIZE
;
1694 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1698 scb
= (void *) skb
->cb
;
1700 scb
->pkt_type
= type
;
1702 skb
->dev
= (void *) hdev
;
1703 hdev
->reassembly
[index
] = skb
;
1707 scb
= (void *) skb
->cb
;
1708 len
= min(scb
->expect
, (__u16
)count
);
1710 memcpy(skb_put(skb
, len
), data
, len
);
1719 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1720 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1721 scb
->expect
= h
->plen
;
1723 if (skb_tailroom(skb
) < scb
->expect
) {
1725 hdev
->reassembly
[index
] = NULL
;
1731 case HCI_ACLDATA_PKT
:
1732 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1733 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1734 scb
->expect
= __le16_to_cpu(h
->dlen
);
1736 if (skb_tailroom(skb
) < scb
->expect
) {
1738 hdev
->reassembly
[index
] = NULL
;
1744 case HCI_SCODATA_PKT
:
1745 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1746 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1747 scb
->expect
= h
->dlen
;
1749 if (skb_tailroom(skb
) < scb
->expect
) {
1751 hdev
->reassembly
[index
] = NULL
;
1758 if (scb
->expect
== 0) {
1759 /* Complete frame */
1761 bt_cb(skb
)->pkt_type
= type
;
1762 hci_recv_frame(skb
);
1764 hdev
->reassembly
[index
] = NULL
;
1772 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1776 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1780 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1784 data
+= (count
- rem
);
1790 EXPORT_SYMBOL(hci_recv_fragment
);
1792 #define STREAM_REASSEMBLY 0
1794 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1800 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1803 struct { char type
; } *pkt
;
1805 /* Start of the frame */
1812 type
= bt_cb(skb
)->pkt_type
;
1814 rem
= hci_reassembly(hdev
, type
, data
, count
,
1819 data
+= (count
- rem
);
1825 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1827 /* ---- Interface to upper protocols ---- */
1829 int hci_register_cb(struct hci_cb
*cb
)
1831 BT_DBG("%p name %s", cb
, cb
->name
);
1833 write_lock(&hci_cb_list_lock
);
1834 list_add(&cb
->list
, &hci_cb_list
);
1835 write_unlock(&hci_cb_list_lock
);
1839 EXPORT_SYMBOL(hci_register_cb
);
1841 int hci_unregister_cb(struct hci_cb
*cb
)
1843 BT_DBG("%p name %s", cb
, cb
->name
);
1845 write_lock(&hci_cb_list_lock
);
1846 list_del(&cb
->list
);
1847 write_unlock(&hci_cb_list_lock
);
1851 EXPORT_SYMBOL(hci_unregister_cb
);
1853 static int hci_send_frame(struct sk_buff
*skb
)
1855 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1862 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1864 if (atomic_read(&hdev
->promisc
)) {
1866 __net_timestamp(skb
);
1868 hci_send_to_sock(hdev
, skb
, NULL
);
1871 /* Get rid of skb owner, prior to sending to the driver. */
1874 return hdev
->send(skb
);
1877 /* Send HCI command */
1878 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1880 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1881 struct hci_command_hdr
*hdr
;
1882 struct sk_buff
*skb
;
1884 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1886 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1888 BT_ERR("%s no memory for command", hdev
->name
);
1892 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1893 hdr
->opcode
= cpu_to_le16(opcode
);
1897 memcpy(skb_put(skb
, plen
), param
, plen
);
1899 BT_DBG("skb len %d", skb
->len
);
1901 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1902 skb
->dev
= (void *) hdev
;
1904 if (test_bit(HCI_INIT
, &hdev
->flags
))
1905 hdev
->init_last_cmd
= opcode
;
1907 skb_queue_tail(&hdev
->cmd_q
, skb
);
1908 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1913 /* Get data from the previously sent command */
1914 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1916 struct hci_command_hdr
*hdr
;
1918 if (!hdev
->sent_cmd
)
1921 hdr
= (void *) hdev
->sent_cmd
->data
;
1923 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1926 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1928 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1932 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1934 struct hci_acl_hdr
*hdr
;
1937 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1938 skb_reset_transport_header(skb
);
1939 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1940 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1941 hdr
->dlen
= cpu_to_le16(len
);
1944 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
1945 struct sk_buff
*skb
, __u16 flags
)
1947 struct hci_dev
*hdev
= conn
->hdev
;
1948 struct sk_buff
*list
;
1950 list
= skb_shinfo(skb
)->frag_list
;
1952 /* Non fragmented */
1953 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1955 skb_queue_tail(queue
, skb
);
1958 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1960 skb_shinfo(skb
)->frag_list
= NULL
;
1962 /* Queue all fragments atomically */
1963 spin_lock(&queue
->lock
);
1965 __skb_queue_tail(queue
, skb
);
1967 flags
&= ~ACL_START
;
1970 skb
= list
; list
= list
->next
;
1972 skb
->dev
= (void *) hdev
;
1973 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1974 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1976 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1978 __skb_queue_tail(queue
, skb
);
1981 spin_unlock(&queue
->lock
);
1985 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
1987 struct hci_conn
*conn
= chan
->conn
;
1988 struct hci_dev
*hdev
= conn
->hdev
;
1990 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
1992 skb
->dev
= (void *) hdev
;
1993 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1994 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1996 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
1998 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2000 EXPORT_SYMBOL(hci_send_acl
);
2003 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2005 struct hci_dev
*hdev
= conn
->hdev
;
2006 struct hci_sco_hdr hdr
;
2008 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2010 hdr
.handle
= cpu_to_le16(conn
->handle
);
2011 hdr
.dlen
= skb
->len
;
2013 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2014 skb_reset_transport_header(skb
);
2015 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2017 skb
->dev
= (void *) hdev
;
2018 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2020 skb_queue_tail(&conn
->data_q
, skb
);
2021 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2023 EXPORT_SYMBOL(hci_send_sco
);
2025 /* ---- HCI TX task (outgoing data) ---- */
2027 /* HCI Connection scheduler */
2028 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2030 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2031 struct hci_conn
*conn
= NULL
, *c
;
2032 int num
= 0, min
= ~0;
2034 /* We don't have to lock device here. Connections are always
2035 * added and removed with TX task disabled. */
2039 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2040 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2043 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2048 if (c
->sent
< min
) {
2053 if (hci_conn_num(hdev
, type
) == num
)
2062 switch (conn
->type
) {
2064 cnt
= hdev
->acl_cnt
;
2068 cnt
= hdev
->sco_cnt
;
2071 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2075 BT_ERR("Unknown link type");
2083 BT_DBG("conn %p quote %d", conn
, *quote
);
2087 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2089 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2092 BT_ERR("%s link tx timeout", hdev
->name
);
2096 /* Kill stalled connections */
2097 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2098 if (c
->type
== type
&& c
->sent
) {
2099 BT_ERR("%s killing stalled connection %s",
2100 hdev
->name
, batostr(&c
->dst
));
2101 hci_acl_disconn(c
, 0x13);
2108 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2111 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2112 struct hci_chan
*chan
= NULL
;
2113 int num
= 0, min
= ~0, cur_prio
= 0;
2114 struct hci_conn
*conn
;
2115 int cnt
, q
, conn_num
= 0;
2117 BT_DBG("%s", hdev
->name
);
2121 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2122 struct hci_chan
*tmp
;
2124 if (conn
->type
!= type
)
2127 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2132 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2133 struct sk_buff
*skb
;
2135 if (skb_queue_empty(&tmp
->data_q
))
2138 skb
= skb_peek(&tmp
->data_q
);
2139 if (skb
->priority
< cur_prio
)
2142 if (skb
->priority
> cur_prio
) {
2145 cur_prio
= skb
->priority
;
2150 if (conn
->sent
< min
) {
2156 if (hci_conn_num(hdev
, type
) == conn_num
)
2165 switch (chan
->conn
->type
) {
2167 cnt
= hdev
->acl_cnt
;
2171 cnt
= hdev
->sco_cnt
;
2174 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2178 BT_ERR("Unknown link type");
2183 BT_DBG("chan %p quote %d", chan
, *quote
);
2187 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2189 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2190 struct hci_conn
*conn
;
2193 BT_DBG("%s", hdev
->name
);
2197 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2198 struct hci_chan
*chan
;
2200 if (conn
->type
!= type
)
2203 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2208 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2209 struct sk_buff
*skb
;
2216 if (skb_queue_empty(&chan
->data_q
))
2219 skb
= skb_peek(&chan
->data_q
);
2220 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2223 skb
->priority
= HCI_PRIO_MAX
- 1;
2225 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2229 if (hci_conn_num(hdev
, type
) == num
)
2237 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2239 struct hci_chan
*chan
;
2240 struct sk_buff
*skb
;
2244 BT_DBG("%s", hdev
->name
);
2246 if (!hci_conn_num(hdev
, ACL_LINK
))
2249 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2250 /* ACL tx timeout must be longer than maximum
2251 * link supervision timeout (40.9 seconds) */
2252 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
2253 hci_link_tx_to(hdev
, ACL_LINK
);
2256 cnt
= hdev
->acl_cnt
;
2258 while (hdev
->acl_cnt
&&
2259 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2260 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2261 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2262 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2263 skb
->len
, skb
->priority
);
2265 /* Stop if priority has changed */
2266 if (skb
->priority
< priority
)
2269 skb
= skb_dequeue(&chan
->data_q
);
2271 hci_conn_enter_active_mode(chan
->conn
,
2272 bt_cb(skb
)->force_active
);
2274 hci_send_frame(skb
);
2275 hdev
->acl_last_tx
= jiffies
;
2283 if (cnt
!= hdev
->acl_cnt
)
2284 hci_prio_recalculate(hdev
, ACL_LINK
);
2288 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2290 struct hci_conn
*conn
;
2291 struct sk_buff
*skb
;
2294 BT_DBG("%s", hdev
->name
);
2296 if (!hci_conn_num(hdev
, SCO_LINK
))
2299 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2300 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2301 BT_DBG("skb %p len %d", skb
, skb
->len
);
2302 hci_send_frame(skb
);
2305 if (conn
->sent
== ~0)
2311 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2313 struct hci_conn
*conn
;
2314 struct sk_buff
*skb
;
2317 BT_DBG("%s", hdev
->name
);
2319 if (!hci_conn_num(hdev
, ESCO_LINK
))
2322 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2323 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2324 BT_DBG("skb %p len %d", skb
, skb
->len
);
2325 hci_send_frame(skb
);
2328 if (conn
->sent
== ~0)
2334 static inline void hci_sched_le(struct hci_dev
*hdev
)
2336 struct hci_chan
*chan
;
2337 struct sk_buff
*skb
;
2338 int quote
, cnt
, tmp
;
2340 BT_DBG("%s", hdev
->name
);
2342 if (!hci_conn_num(hdev
, LE_LINK
))
2345 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2346 /* LE tx timeout must be longer than maximum
2347 * link supervision timeout (40.9 seconds) */
2348 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2349 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2350 hci_link_tx_to(hdev
, LE_LINK
);
2353 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2355 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2356 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2357 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2358 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2359 skb
->len
, skb
->priority
);
2361 /* Stop if priority has changed */
2362 if (skb
->priority
< priority
)
2365 skb
= skb_dequeue(&chan
->data_q
);
2367 hci_send_frame(skb
);
2368 hdev
->le_last_tx
= jiffies
;
2379 hdev
->acl_cnt
= cnt
;
2382 hci_prio_recalculate(hdev
, LE_LINK
);
2385 static void hci_tx_work(struct work_struct
*work
)
2387 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2388 struct sk_buff
*skb
;
2390 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2391 hdev
->sco_cnt
, hdev
->le_cnt
);
2393 /* Schedule queues and send stuff to HCI driver */
2395 hci_sched_acl(hdev
);
2397 hci_sched_sco(hdev
);
2399 hci_sched_esco(hdev
);
2403 /* Send next queued raw (unknown type) packet */
2404 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2405 hci_send_frame(skb
);
2408 /* ----- HCI RX task (incoming data processing) ----- */
2410 /* ACL data packet */
2411 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2413 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2414 struct hci_conn
*conn
;
2415 __u16 handle
, flags
;
2417 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2419 handle
= __le16_to_cpu(hdr
->handle
);
2420 flags
= hci_flags(handle
);
2421 handle
= hci_handle(handle
);
2423 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2425 hdev
->stat
.acl_rx
++;
2428 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2429 hci_dev_unlock(hdev
);
2432 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2434 /* Send to upper protocol */
2435 l2cap_recv_acldata(conn
, skb
, flags
);
2438 BT_ERR("%s ACL packet for unknown connection handle %d",
2439 hdev
->name
, handle
);
2445 /* SCO data packet */
2446 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2448 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2449 struct hci_conn
*conn
;
2452 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2454 handle
= __le16_to_cpu(hdr
->handle
);
2456 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2458 hdev
->stat
.sco_rx
++;
2461 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2462 hci_dev_unlock(hdev
);
2465 /* Send to upper protocol */
2466 sco_recv_scodata(conn
, skb
);
2469 BT_ERR("%s SCO packet for unknown connection handle %d",
2470 hdev
->name
, handle
);
2476 static void hci_rx_work(struct work_struct
*work
)
2478 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2479 struct sk_buff
*skb
;
2481 BT_DBG("%s", hdev
->name
);
2483 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2484 if (atomic_read(&hdev
->promisc
)) {
2485 /* Send copy to the sockets */
2486 hci_send_to_sock(hdev
, skb
, NULL
);
2489 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2494 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2495 /* Don't process data packets in this states. */
2496 switch (bt_cb(skb
)->pkt_type
) {
2497 case HCI_ACLDATA_PKT
:
2498 case HCI_SCODATA_PKT
:
2505 switch (bt_cb(skb
)->pkt_type
) {
2507 BT_DBG("%s Event packet", hdev
->name
);
2508 hci_event_packet(hdev
, skb
);
2511 case HCI_ACLDATA_PKT
:
2512 BT_DBG("%s ACL data packet", hdev
->name
);
2513 hci_acldata_packet(hdev
, skb
);
2516 case HCI_SCODATA_PKT
:
2517 BT_DBG("%s SCO data packet", hdev
->name
);
2518 hci_scodata_packet(hdev
, skb
);
2528 static void hci_cmd_work(struct work_struct
*work
)
2530 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2531 struct sk_buff
*skb
;
2533 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2535 /* Send queued commands */
2536 if (atomic_read(&hdev
->cmd_cnt
)) {
2537 skb
= skb_dequeue(&hdev
->cmd_q
);
2541 kfree_skb(hdev
->sent_cmd
);
2543 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2544 if (hdev
->sent_cmd
) {
2545 atomic_dec(&hdev
->cmd_cnt
);
2546 hci_send_frame(skb
);
2547 if (test_bit(HCI_RESET
, &hdev
->flags
))
2548 del_timer(&hdev
->cmd_timer
);
2550 mod_timer(&hdev
->cmd_timer
,
2551 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2553 skb_queue_head(&hdev
->cmd_q
, skb
);
2554 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2559 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2561 /* General inquiry access code (GIAC) */
2562 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2563 struct hci_cp_inquiry cp
;
2565 BT_DBG("%s", hdev
->name
);
2567 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2568 return -EINPROGRESS
;
2570 memset(&cp
, 0, sizeof(cp
));
2571 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2574 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2577 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2579 BT_DBG("%s", hdev
->name
);
2581 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2584 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2587 module_param(enable_hs
, bool, 0644);
2588 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");