2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50 #include <net/bluetooth/hci_mon.h>
52 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
54 /* ----- HCI socket interface ----- */
56 static inline int hci_test_bit(int nr
, void *addr
)
58 return *((__u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
62 static struct hci_sec_filter hci_sec_filter
= {
66 { 0x1000d9fe, 0x0000b00c },
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
83 static struct bt_sock_list hci_sk_list
= {
84 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
91 struct hlist_node
*node
;
92 struct sk_buff
*skb_copy
= NULL
;
94 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
96 read_lock(&hci_sk_list
.lock
);
98 sk_for_each(sk
, node
, &hci_sk_list
.head
) {
99 struct hci_filter
*flt
;
100 struct sk_buff
*nskb
;
102 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
105 /* Don't send frame to the socket it came from */
109 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
)
113 flt
= &hci_pi(sk
)->filter
;
115 if (!test_bit((bt_cb(skb
)->pkt_type
== HCI_VENDOR_PKT
) ?
116 0 : (bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
), &flt
->type_mask
))
119 if (bt_cb(skb
)->pkt_type
== HCI_EVENT_PKT
) {
120 register int evt
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
122 if (!hci_test_bit(evt
, &flt
->event_mask
))
126 ((evt
== HCI_EV_CMD_COMPLETE
&&
128 get_unaligned((__le16
*)(skb
->data
+ 3))) ||
129 (evt
== HCI_EV_CMD_STATUS
&&
131 get_unaligned((__le16
*)(skb
->data
+ 4)))))
136 /* Create a private copy with headroom */
137 skb_copy
= __pskb_copy(skb
, 1, GFP_ATOMIC
);
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
145 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
149 if (sock_queue_rcv_skb(sk
, nskb
))
153 read_unlock(&hci_sk_list
.lock
);
158 /* Send frame to control socket */
159 void hci_send_to_control(struct sk_buff
*skb
, struct sock
*skip_sk
)
162 struct hlist_node
*node
;
164 BT_DBG("len %d", skb
->len
);
166 read_lock(&hci_sk_list
.lock
);
168 sk_for_each(sk
, node
, &hci_sk_list
.head
) {
169 struct sk_buff
*nskb
;
171 /* Skip the original socket */
175 if (sk
->sk_state
!= BT_BOUND
)
178 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_CONTROL
)
181 nskb
= skb_clone(skb
, GFP_ATOMIC
);
185 if (sock_queue_rcv_skb(sk
, nskb
))
189 read_unlock(&hci_sk_list
.lock
);
192 /* Send frame to monitor socket */
193 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
196 struct hlist_node
*node
;
197 struct sk_buff
*skb_copy
= NULL
;
200 if (!atomic_read(&monitor_promisc
))
203 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
205 switch (bt_cb(skb
)->pkt_type
) {
206 case HCI_COMMAND_PKT
:
207 opcode
= __constant_cpu_to_le16(HCI_MON_COMMAND_PKT
);
210 opcode
= __constant_cpu_to_le16(HCI_MON_EVENT_PKT
);
212 case HCI_ACLDATA_PKT
:
213 if (bt_cb(skb
)->incoming
)
214 opcode
= __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT
);
216 opcode
= __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT
);
218 case HCI_SCODATA_PKT
:
219 if (bt_cb(skb
)->incoming
)
220 opcode
= __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT
);
222 opcode
= __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT
);
228 read_lock(&hci_sk_list
.lock
);
230 sk_for_each(sk
, node
, &hci_sk_list
.head
) {
231 struct sk_buff
*nskb
;
233 if (sk
->sk_state
!= BT_BOUND
)
236 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
240 struct hci_mon_hdr
*hdr
;
242 /* Create a private copy with headroom */
243 skb_copy
= __pskb_copy(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
);
247 /* Put header before the data */
248 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
249 hdr
->opcode
= opcode
;
250 hdr
->index
= cpu_to_le16(hdev
->id
);
251 hdr
->len
= cpu_to_le16(skb
->len
);
254 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
258 if (sock_queue_rcv_skb(sk
, nskb
))
262 read_unlock(&hci_sk_list
.lock
);
267 static void send_monitor_event(struct sk_buff
*skb
)
270 struct hlist_node
*node
;
272 BT_DBG("len %d", skb
->len
);
274 read_lock(&hci_sk_list
.lock
);
276 sk_for_each(sk
, node
, &hci_sk_list
.head
) {
277 struct sk_buff
*nskb
;
279 if (sk
->sk_state
!= BT_BOUND
)
282 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
285 nskb
= skb_clone(skb
, GFP_ATOMIC
);
289 if (sock_queue_rcv_skb(sk
, nskb
))
293 read_unlock(&hci_sk_list
.lock
);
296 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
298 struct hci_mon_hdr
*hdr
;
299 struct hci_mon_new_index
*ni
;
305 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
309 ni
= (void *) skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
310 ni
->type
= hdev
->dev_type
;
312 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
313 memcpy(ni
->name
, hdev
->name
, 8);
315 opcode
= __constant_cpu_to_le16(HCI_MON_NEW_INDEX
);
319 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
323 opcode
= __constant_cpu_to_le16(HCI_MON_DEL_INDEX
);
330 __net_timestamp(skb
);
332 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
333 hdr
->opcode
= opcode
;
334 hdr
->index
= cpu_to_le16(hdev
->id
);
335 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
340 static void send_monitor_replay(struct sock
*sk
)
342 struct hci_dev
*hdev
;
344 read_lock(&hci_dev_list_lock
);
346 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
349 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
353 if (sock_queue_rcv_skb(sk
, skb
))
357 read_unlock(&hci_dev_list_lock
);
360 /* Generate internal stack event */
361 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
363 struct hci_event_hdr
*hdr
;
364 struct hci_ev_stack_internal
*ev
;
367 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
371 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
372 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
373 hdr
->plen
= sizeof(*ev
) + dlen
;
375 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
377 memcpy(ev
->data
, data
, dlen
);
379 bt_cb(skb
)->incoming
= 1;
380 __net_timestamp(skb
);
382 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
383 skb
->dev
= (void *) hdev
;
384 hci_send_to_sock(hdev
, skb
);
388 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
390 struct hci_ev_si_device ev
;
392 BT_DBG("hdev %s event %d", hdev
->name
, event
);
394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc
)) {
398 skb
= create_monitor_event(hdev
, event
);
400 send_monitor_event(skb
);
405 /* Send event to sockets */
407 ev
.dev_id
= hdev
->id
;
408 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
410 if (event
== HCI_DEV_UNREG
) {
412 struct hlist_node
*node
;
414 /* Detach sockets from device */
415 read_lock(&hci_sk_list
.lock
);
416 sk_for_each(sk
, node
, &hci_sk_list
.head
) {
417 bh_lock_sock_nested(sk
);
418 if (hci_pi(sk
)->hdev
== hdev
) {
419 hci_pi(sk
)->hdev
= NULL
;
421 sk
->sk_state
= BT_OPEN
;
422 sk
->sk_state_change(sk
);
428 read_unlock(&hci_sk_list
.lock
);
432 static int hci_sock_release(struct socket
*sock
)
434 struct sock
*sk
= sock
->sk
;
435 struct hci_dev
*hdev
;
437 BT_DBG("sock %p sk %p", sock
, sk
);
442 hdev
= hci_pi(sk
)->hdev
;
444 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
445 atomic_dec(&monitor_promisc
);
447 bt_sock_unlink(&hci_sk_list
, sk
);
450 atomic_dec(&hdev
->promisc
);
456 skb_queue_purge(&sk
->sk_receive_queue
);
457 skb_queue_purge(&sk
->sk_write_queue
);
463 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
468 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
473 err
= hci_blacklist_add(hdev
, &bdaddr
, 0);
475 hci_dev_unlock(hdev
);
480 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
485 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
490 err
= hci_blacklist_del(hdev
, &bdaddr
, 0);
492 hci_dev_unlock(hdev
);
497 /* Ioctls that require bound socket */
498 static inline int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
, unsigned long arg
)
500 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
507 if (!capable(CAP_NET_ADMIN
))
510 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
514 set_bit(HCI_RAW
, &hdev
->flags
);
516 clear_bit(HCI_RAW
, &hdev
->flags
);
521 return hci_get_conn_info(hdev
, (void __user
*) arg
);
524 return hci_get_auth_info(hdev
, (void __user
*) arg
);
527 if (!capable(CAP_NET_ADMIN
))
529 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
532 if (!capable(CAP_NET_ADMIN
))
534 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
538 return hdev
->ioctl(hdev
, cmd
, arg
);
543 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
545 struct sock
*sk
= sock
->sk
;
546 void __user
*argp
= (void __user
*) arg
;
549 BT_DBG("cmd %x arg %lx", cmd
, arg
);
553 return hci_get_dev_list(argp
);
556 return hci_get_dev_info(argp
);
559 return hci_get_conn_list(argp
);
562 if (!capable(CAP_NET_ADMIN
))
564 return hci_dev_open(arg
);
567 if (!capable(CAP_NET_ADMIN
))
569 return hci_dev_close(arg
);
572 if (!capable(CAP_NET_ADMIN
))
574 return hci_dev_reset(arg
);
577 if (!capable(CAP_NET_ADMIN
))
579 return hci_dev_reset_stat(arg
);
589 if (!capable(CAP_NET_ADMIN
))
591 return hci_dev_cmd(cmd
, argp
);
594 return hci_inquiry(argp
);
598 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
604 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
606 struct sockaddr_hci haddr
;
607 struct sock
*sk
= sock
->sk
;
608 struct hci_dev
*hdev
= NULL
;
611 BT_DBG("sock %p sk %p", sock
, sk
);
616 memset(&haddr
, 0, sizeof(haddr
));
617 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
618 memcpy(&haddr
, addr
, len
);
620 if (haddr
.hci_family
!= AF_BLUETOOTH
)
625 if (sk
->sk_state
== BT_BOUND
) {
630 switch (haddr
.hci_channel
) {
631 case HCI_CHANNEL_RAW
:
632 if (hci_pi(sk
)->hdev
) {
637 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
638 hdev
= hci_dev_get(haddr
.hci_dev
);
644 atomic_inc(&hdev
->promisc
);
647 hci_pi(sk
)->hdev
= hdev
;
650 case HCI_CHANNEL_CONTROL
:
651 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
656 if (!capable(CAP_NET_ADMIN
)) {
663 case HCI_CHANNEL_MONITOR
:
664 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
669 if (!capable(CAP_NET_RAW
)) {
674 send_monitor_replay(sk
);
676 atomic_inc(&monitor_promisc
);
685 hci_pi(sk
)->channel
= haddr
.hci_channel
;
686 sk
->sk_state
= BT_BOUND
;
693 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *addr_len
, int peer
)
695 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
696 struct sock
*sk
= sock
->sk
;
697 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
699 BT_DBG("sock %p sk %p", sock
, sk
);
706 *addr_len
= sizeof(*haddr
);
707 haddr
->hci_family
= AF_BLUETOOTH
;
708 haddr
->hci_dev
= hdev
->id
;
714 static inline void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
, struct sk_buff
*skb
)
716 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
718 if (mask
& HCI_CMSG_DIR
) {
719 int incoming
= bt_cb(skb
)->incoming
;
720 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
), &incoming
);
723 if (mask
& HCI_CMSG_TSTAMP
) {
725 struct compat_timeval ctv
;
731 skb_get_timestamp(skb
, &tv
);
736 if (!COMPAT_USE_64BIT_TIME
&&
737 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
738 ctv
.tv_sec
= tv
.tv_sec
;
739 ctv
.tv_usec
= tv
.tv_usec
;
745 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
749 static int hci_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
750 struct msghdr
*msg
, size_t len
, int flags
)
752 int noblock
= flags
& MSG_DONTWAIT
;
753 struct sock
*sk
= sock
->sk
;
757 BT_DBG("sock %p, sk %p", sock
, sk
);
759 if (flags
& (MSG_OOB
))
762 if (sk
->sk_state
== BT_CLOSED
)
765 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
769 msg
->msg_namelen
= 0;
773 msg
->msg_flags
|= MSG_TRUNC
;
777 skb_reset_transport_header(skb
);
778 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
780 switch (hci_pi(sk
)->channel
) {
781 case HCI_CHANNEL_RAW
:
782 hci_sock_cmsg(sk
, msg
, skb
);
784 case HCI_CHANNEL_CONTROL
:
785 case HCI_CHANNEL_MONITOR
:
786 sock_recv_timestamp(msg
, sk
, skb
);
790 skb_free_datagram(sk
, skb
);
792 return err
? : copied
;
795 static int hci_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
796 struct msghdr
*msg
, size_t len
)
798 struct sock
*sk
= sock
->sk
;
799 struct hci_dev
*hdev
;
803 BT_DBG("sock %p sk %p", sock
, sk
);
805 if (msg
->msg_flags
& MSG_OOB
)
808 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
811 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
816 switch (hci_pi(sk
)->channel
) {
817 case HCI_CHANNEL_RAW
:
819 case HCI_CHANNEL_CONTROL
:
820 err
= mgmt_control(sk
, msg
, len
);
822 case HCI_CHANNEL_MONITOR
:
830 hdev
= hci_pi(sk
)->hdev
;
836 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
841 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
845 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
850 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
852 skb
->dev
= (void *) hdev
;
854 if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
855 u16 opcode
= get_unaligned_le16(skb
->data
);
856 u16 ogf
= hci_opcode_ogf(opcode
);
857 u16 ocf
= hci_opcode_ocf(opcode
);
859 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
860 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
, &hci_sec_filter
.ocf_mask
[ogf
])) &&
861 !capable(CAP_NET_RAW
)) {
866 if (test_bit(HCI_RAW
, &hdev
->flags
) || (ogf
== 0x3f)) {
867 skb_queue_tail(&hdev
->raw_q
, skb
);
868 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
870 skb_queue_tail(&hdev
->cmd_q
, skb
);
871 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
874 if (!capable(CAP_NET_RAW
)) {
879 skb_queue_tail(&hdev
->raw_q
, skb
);
880 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
894 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int len
)
896 struct hci_ufilter uf
= { .opcode
= 0 };
897 struct sock
*sk
= sock
->sk
;
898 int err
= 0, opt
= 0;
900 BT_DBG("sk %p, opt %d", sk
, optname
);
904 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
911 if (get_user(opt
, (int __user
*)optval
)) {
917 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
919 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
923 if (get_user(opt
, (int __user
*)optval
)) {
929 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
931 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
936 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
938 uf
.type_mask
= f
->type_mask
;
939 uf
.opcode
= f
->opcode
;
940 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
941 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
944 len
= min_t(unsigned int, len
, sizeof(uf
));
945 if (copy_from_user(&uf
, optval
, len
)) {
950 if (!capable(CAP_NET_RAW
)) {
951 uf
.type_mask
&= hci_sec_filter
.type_mask
;
952 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
953 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
957 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
959 f
->type_mask
= uf
.type_mask
;
960 f
->opcode
= uf
.opcode
;
961 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
962 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
976 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
978 struct hci_ufilter uf
;
979 struct sock
*sk
= sock
->sk
;
980 int len
, opt
, err
= 0;
982 BT_DBG("sk %p, opt %d", sk
, optname
);
984 if (get_user(len
, optlen
))
989 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
996 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1001 if (put_user(opt
, optval
))
1005 case HCI_TIME_STAMP
:
1006 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1011 if (put_user(opt
, optval
))
1017 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1019 uf
.type_mask
= f
->type_mask
;
1020 uf
.opcode
= f
->opcode
;
1021 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1022 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1025 len
= min_t(unsigned int, len
, sizeof(uf
));
1026 if (copy_to_user(optval
, &uf
, len
))
1040 static const struct proto_ops hci_sock_ops
= {
1041 .family
= PF_BLUETOOTH
,
1042 .owner
= THIS_MODULE
,
1043 .release
= hci_sock_release
,
1044 .bind
= hci_sock_bind
,
1045 .getname
= hci_sock_getname
,
1046 .sendmsg
= hci_sock_sendmsg
,
1047 .recvmsg
= hci_sock_recvmsg
,
1048 .ioctl
= hci_sock_ioctl
,
1049 .poll
= datagram_poll
,
1050 .listen
= sock_no_listen
,
1051 .shutdown
= sock_no_shutdown
,
1052 .setsockopt
= hci_sock_setsockopt
,
1053 .getsockopt
= hci_sock_getsockopt
,
1054 .connect
= sock_no_connect
,
1055 .socketpair
= sock_no_socketpair
,
1056 .accept
= sock_no_accept
,
1057 .mmap
= sock_no_mmap
1060 static struct proto hci_sk_proto
= {
1062 .owner
= THIS_MODULE
,
1063 .obj_size
= sizeof(struct hci_pinfo
)
1066 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1071 BT_DBG("sock %p", sock
);
1073 if (sock
->type
!= SOCK_RAW
)
1074 return -ESOCKTNOSUPPORT
;
1076 sock
->ops
= &hci_sock_ops
;
1078 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
);
1082 sock_init_data(sock
, sk
);
1084 sock_reset_flag(sk
, SOCK_ZAPPED
);
1086 sk
->sk_protocol
= protocol
;
1088 sock
->state
= SS_UNCONNECTED
;
1089 sk
->sk_state
= BT_OPEN
;
1091 bt_sock_link(&hci_sk_list
, sk
);
1095 static const struct net_proto_family hci_sock_family_ops
= {
1096 .family
= PF_BLUETOOTH
,
1097 .owner
= THIS_MODULE
,
1098 .create
= hci_sock_create
,
1101 int __init
hci_sock_init(void)
1105 err
= proto_register(&hci_sk_proto
, 0);
1109 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1113 BT_INFO("HCI socket layer initialized");
1118 BT_ERR("HCI socket registration failed");
1119 proto_unregister(&hci_sk_proto
);
1123 void hci_sock_cleanup(void)
1125 if (bt_sock_unregister(BTPROTO_HCI
) < 0)
1126 BT_ERR("HCI socket unregistration failed");
1128 proto_unregister(&hci_sk_proto
);