bdf51d1d2c191b6f79af3058c8c23f88d52e022a
[deliverable/linux.git] / net / bluetooth / hci_sock.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
44
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50 #include <net/bluetooth/hci_mon.h>
51
52 static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
54 /* ----- HCI socket interface ----- */
55
56 static inline int hci_test_bit(int nr, void *addr)
57 {
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59 }
60
61 /* Security filter */
62 static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
66 { 0x1000d9fe, 0x0000b00c },
67 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
72 /* OGF_LINK_POLICY */
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
74 /* OGF_HOST_CTL */
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
76 /* OGF_INFO_PARAM */
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
80 }
81 };
82
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
85 };
86
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 struct sock *sk;
91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
93
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
97
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
110 continue;
111
112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
117 &flt->type_mask))
118 continue;
119
120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
121 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
122
123 if (!hci_test_bit(evt, &flt->event_mask))
124 continue;
125
126 if (flt->opcode &&
127 ((evt == HCI_EV_CMD_COMPLETE &&
128 flt->opcode !=
129 get_unaligned((__le16 *)(skb->data + 3))) ||
130 (evt == HCI_EV_CMD_STATUS &&
131 flt->opcode !=
132 get_unaligned((__le16 *)(skb->data + 4)))))
133 continue;
134 }
135
136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
147 if (!nskb)
148 continue;
149
150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
153
154 read_unlock(&hci_sk_list.lock);
155
156 kfree_skb(skb_copy);
157 }
158
159 /* Send frame to control socket */
160 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161 {
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
182 nskb = skb_clone(skb, GFP_ATOMIC);
183 if (!nskb)
184 continue;
185
186 if (sock_queue_rcv_skb(sk, nskb))
187 kfree_skb(nskb);
188 }
189
190 read_unlock(&hci_sk_list.lock);
191 }
192
193 /* Send frame to monitor socket */
194 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 if (!skb_copy)
246 continue;
247
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
253 }
254
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 if (!nskb)
257 continue;
258
259 if (sock_queue_rcv_skb(sk, nskb))
260 kfree_skb(nskb);
261 }
262
263 read_unlock(&hci_sk_list.lock);
264
265 kfree_skb(skb_copy);
266 }
267
268 static void send_monitor_event(struct sk_buff *skb)
269 {
270 struct sock *sk;
271 struct hlist_node *node;
272
273 BT_DBG("len %d", skb->len);
274
275 read_lock(&hci_sk_list.lock);
276
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
279
280 if (sk->sk_state != BT_BOUND)
281 continue;
282
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 continue;
285
286 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (!nskb)
288 continue;
289
290 if (sock_queue_rcv_skb(sk, nskb))
291 kfree_skb(nskb);
292 }
293
294 read_unlock(&hci_sk_list.lock);
295 }
296
297 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298 {
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339 }
340
341 static void send_monitor_replay(struct sock *sk)
342 {
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359 }
360
361 /* Generate internal stack event */
362 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363 {
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
386 kfree_skb(skb);
387 }
388
389 void hci_sock_dev_event(struct hci_dev *hdev, int event)
390 {
391 struct hci_ev_si_device ev;
392
393 BT_DBG("hdev %s event %d", hdev->name, event);
394
395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
397 struct sk_buff *skb;
398
399 skb = create_monitor_event(hdev, event);
400 if (skb) {
401 send_monitor_event(skb);
402 kfree_skb(skb);
403 }
404 }
405
406 /* Send event to sockets */
407 ev.event = event;
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411 if (event == HCI_DEV_UNREG) {
412 struct sock *sk;
413 struct hlist_node *node;
414
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
421 sk->sk_err = EPIPE;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
424
425 hci_dev_put(hdev);
426 }
427 bh_unlock_sock(sk);
428 }
429 read_unlock(&hci_sk_list.lock);
430 }
431 }
432
433 static int hci_sock_release(struct socket *sock)
434 {
435 struct sock *sk = sock->sk;
436 struct hci_dev *hdev;
437
438 BT_DBG("sock %p sk %p", sock, sk);
439
440 if (!sk)
441 return 0;
442
443 hdev = hci_pi(sk)->hdev;
444
445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
447
448 bt_sock_unlink(&hci_sk_list, sk);
449
450 if (hdev) {
451 atomic_dec(&hdev->promisc);
452 hci_dev_put(hdev);
453 }
454
455 sock_orphan(sk);
456
457 skb_queue_purge(&sk->sk_receive_queue);
458 skb_queue_purge(&sk->sk_write_queue);
459
460 sock_put(sk);
461 return 0;
462 }
463
464 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
465 {
466 bdaddr_t bdaddr;
467 int err;
468
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 return -EFAULT;
471
472 hci_dev_lock(hdev);
473
474 err = hci_blacklist_add(hdev, &bdaddr, 0);
475
476 hci_dev_unlock(hdev);
477
478 return err;
479 }
480
481 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
482 {
483 bdaddr_t bdaddr;
484 int err;
485
486 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 return -EFAULT;
488
489 hci_dev_lock(hdev);
490
491 err = hci_blacklist_del(hdev, &bdaddr, 0);
492
493 hci_dev_unlock(hdev);
494
495 return err;
496 }
497
498 /* Ioctls that require bound socket */
499 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
500 {
501 struct hci_dev *hdev = hci_pi(sk)->hdev;
502
503 if (!hdev)
504 return -EBADFD;
505
506 switch (cmd) {
507 case HCISETRAW:
508 if (!capable(CAP_NET_ADMIN))
509 return -EACCES;
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 return -EPERM;
513
514 if (arg)
515 set_bit(HCI_RAW, &hdev->flags);
516 else
517 clear_bit(HCI_RAW, &hdev->flags);
518
519 return 0;
520
521 case HCIGETCONNINFO:
522 return hci_get_conn_info(hdev, (void __user *) arg);
523
524 case HCIGETAUTHINFO:
525 return hci_get_auth_info(hdev, (void __user *) arg);
526
527 case HCIBLOCKADDR:
528 if (!capable(CAP_NET_ADMIN))
529 return -EACCES;
530 return hci_sock_blacklist_add(hdev, (void __user *) arg);
531
532 case HCIUNBLOCKADDR:
533 if (!capable(CAP_NET_ADMIN))
534 return -EACCES;
535 return hci_sock_blacklist_del(hdev, (void __user *) arg);
536
537 default:
538 if (hdev->ioctl)
539 return hdev->ioctl(hdev, cmd, arg);
540 return -EINVAL;
541 }
542 }
543
544 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
545 {
546 struct sock *sk = sock->sk;
547 void __user *argp = (void __user *) arg;
548 int err;
549
550 BT_DBG("cmd %x arg %lx", cmd, arg);
551
552 switch (cmd) {
553 case HCIGETDEVLIST:
554 return hci_get_dev_list(argp);
555
556 case HCIGETDEVINFO:
557 return hci_get_dev_info(argp);
558
559 case HCIGETCONNLIST:
560 return hci_get_conn_list(argp);
561
562 case HCIDEVUP:
563 if (!capable(CAP_NET_ADMIN))
564 return -EACCES;
565 return hci_dev_open(arg);
566
567 case HCIDEVDOWN:
568 if (!capable(CAP_NET_ADMIN))
569 return -EACCES;
570 return hci_dev_close(arg);
571
572 case HCIDEVRESET:
573 if (!capable(CAP_NET_ADMIN))
574 return -EACCES;
575 return hci_dev_reset(arg);
576
577 case HCIDEVRESTAT:
578 if (!capable(CAP_NET_ADMIN))
579 return -EACCES;
580 return hci_dev_reset_stat(arg);
581
582 case HCISETSCAN:
583 case HCISETAUTH:
584 case HCISETENCRYPT:
585 case HCISETPTYPE:
586 case HCISETLINKPOL:
587 case HCISETLINKMODE:
588 case HCISETACLMTU:
589 case HCISETSCOMTU:
590 if (!capable(CAP_NET_ADMIN))
591 return -EACCES;
592 return hci_dev_cmd(cmd, argp);
593
594 case HCIINQUIRY:
595 return hci_inquiry(argp);
596
597 default:
598 lock_sock(sk);
599 err = hci_sock_bound_ioctl(sk, cmd, arg);
600 release_sock(sk);
601 return err;
602 }
603 }
604
605 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
606 {
607 struct sockaddr_hci haddr;
608 struct sock *sk = sock->sk;
609 struct hci_dev *hdev = NULL;
610 int len, err = 0;
611
612 BT_DBG("sock %p sk %p", sock, sk);
613
614 if (!addr)
615 return -EINVAL;
616
617 memset(&haddr, 0, sizeof(haddr));
618 len = min_t(unsigned int, sizeof(haddr), addr_len);
619 memcpy(&haddr, addr, len);
620
621 if (haddr.hci_family != AF_BLUETOOTH)
622 return -EINVAL;
623
624 lock_sock(sk);
625
626 if (sk->sk_state == BT_BOUND) {
627 err = -EALREADY;
628 goto done;
629 }
630
631 switch (haddr.hci_channel) {
632 case HCI_CHANNEL_RAW:
633 if (hci_pi(sk)->hdev) {
634 err = -EALREADY;
635 goto done;
636 }
637
638 if (haddr.hci_dev != HCI_DEV_NONE) {
639 hdev = hci_dev_get(haddr.hci_dev);
640 if (!hdev) {
641 err = -ENODEV;
642 goto done;
643 }
644
645 atomic_inc(&hdev->promisc);
646 }
647
648 hci_pi(sk)->hdev = hdev;
649 break;
650
651 case HCI_CHANNEL_CONTROL:
652 if (haddr.hci_dev != HCI_DEV_NONE) {
653 err = -EINVAL;
654 goto done;
655 }
656
657 if (!capable(CAP_NET_ADMIN)) {
658 err = -EPERM;
659 goto done;
660 }
661
662 break;
663
664 case HCI_CHANNEL_MONITOR:
665 if (haddr.hci_dev != HCI_DEV_NONE) {
666 err = -EINVAL;
667 goto done;
668 }
669
670 if (!capable(CAP_NET_RAW)) {
671 err = -EPERM;
672 goto done;
673 }
674
675 send_monitor_replay(sk);
676
677 atomic_inc(&monitor_promisc);
678 break;
679
680 default:
681 err = -EINVAL;
682 goto done;
683 }
684
685
686 hci_pi(sk)->channel = haddr.hci_channel;
687 sk->sk_state = BT_BOUND;
688
689 done:
690 release_sock(sk);
691 return err;
692 }
693
694 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
695 {
696 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 struct sock *sk = sock->sk;
698 struct hci_dev *hdev = hci_pi(sk)->hdev;
699
700 BT_DBG("sock %p sk %p", sock, sk);
701
702 if (!hdev)
703 return -EBADFD;
704
705 lock_sock(sk);
706
707 *addr_len = sizeof(*haddr);
708 haddr->hci_family = AF_BLUETOOTH;
709 haddr->hci_dev = hdev->id;
710
711 release_sock(sk);
712 return 0;
713 }
714
715 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
716 {
717 __u32 mask = hci_pi(sk)->cmsg_mask;
718
719 if (mask & HCI_CMSG_DIR) {
720 int incoming = bt_cb(skb)->incoming;
721 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
722 }
723
724 if (mask & HCI_CMSG_TSTAMP) {
725 #ifdef CONFIG_COMPAT
726 struct compat_timeval ctv;
727 #endif
728 struct timeval tv;
729 void *data;
730 int len;
731
732 skb_get_timestamp(skb, &tv);
733
734 data = &tv;
735 len = sizeof(tv);
736 #ifdef CONFIG_COMPAT
737 if (!COMPAT_USE_64BIT_TIME &&
738 (msg->msg_flags & MSG_CMSG_COMPAT)) {
739 ctv.tv_sec = tv.tv_sec;
740 ctv.tv_usec = tv.tv_usec;
741 data = &ctv;
742 len = sizeof(ctv);
743 }
744 #endif
745
746 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
747 }
748 }
749
750 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
751 struct msghdr *msg, size_t len, int flags)
752 {
753 int noblock = flags & MSG_DONTWAIT;
754 struct sock *sk = sock->sk;
755 struct sk_buff *skb;
756 int copied, err;
757
758 BT_DBG("sock %p, sk %p", sock, sk);
759
760 if (flags & (MSG_OOB))
761 return -EOPNOTSUPP;
762
763 if (sk->sk_state == BT_CLOSED)
764 return 0;
765
766 skb = skb_recv_datagram(sk, flags, noblock, &err);
767 if (!skb)
768 return err;
769
770 msg->msg_namelen = 0;
771
772 copied = skb->len;
773 if (len < copied) {
774 msg->msg_flags |= MSG_TRUNC;
775 copied = len;
776 }
777
778 skb_reset_transport_header(skb);
779 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
780
781 switch (hci_pi(sk)->channel) {
782 case HCI_CHANNEL_RAW:
783 hci_sock_cmsg(sk, msg, skb);
784 break;
785 case HCI_CHANNEL_CONTROL:
786 case HCI_CHANNEL_MONITOR:
787 sock_recv_timestamp(msg, sk, skb);
788 break;
789 }
790
791 skb_free_datagram(sk, skb);
792
793 return err ? : copied;
794 }
795
796 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
797 struct msghdr *msg, size_t len)
798 {
799 struct sock *sk = sock->sk;
800 struct hci_dev *hdev;
801 struct sk_buff *skb;
802 int err;
803
804 BT_DBG("sock %p sk %p", sock, sk);
805
806 if (msg->msg_flags & MSG_OOB)
807 return -EOPNOTSUPP;
808
809 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
810 return -EINVAL;
811
812 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
813 return -EINVAL;
814
815 lock_sock(sk);
816
817 switch (hci_pi(sk)->channel) {
818 case HCI_CHANNEL_RAW:
819 break;
820 case HCI_CHANNEL_CONTROL:
821 err = mgmt_control(sk, msg, len);
822 goto done;
823 case HCI_CHANNEL_MONITOR:
824 err = -EOPNOTSUPP;
825 goto done;
826 default:
827 err = -EINVAL;
828 goto done;
829 }
830
831 hdev = hci_pi(sk)->hdev;
832 if (!hdev) {
833 err = -EBADFD;
834 goto done;
835 }
836
837 if (!test_bit(HCI_UP, &hdev->flags)) {
838 err = -ENETDOWN;
839 goto done;
840 }
841
842 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
843 if (!skb)
844 goto done;
845
846 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
847 err = -EFAULT;
848 goto drop;
849 }
850
851 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
852 skb_pull(skb, 1);
853 skb->dev = (void *) hdev;
854
855 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
856 u16 opcode = get_unaligned_le16(skb->data);
857 u16 ogf = hci_opcode_ogf(opcode);
858 u16 ocf = hci_opcode_ocf(opcode);
859
860 if (((ogf > HCI_SFLT_MAX_OGF) ||
861 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
862 &hci_sec_filter.ocf_mask[ogf])) &&
863 !capable(CAP_NET_RAW)) {
864 err = -EPERM;
865 goto drop;
866 }
867
868 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
869 skb_queue_tail(&hdev->raw_q, skb);
870 queue_work(hdev->workqueue, &hdev->tx_work);
871 } else {
872 skb_queue_tail(&hdev->cmd_q, skb);
873 queue_work(hdev->workqueue, &hdev->cmd_work);
874 }
875 } else {
876 if (!capable(CAP_NET_RAW)) {
877 err = -EPERM;
878 goto drop;
879 }
880
881 skb_queue_tail(&hdev->raw_q, skb);
882 queue_work(hdev->workqueue, &hdev->tx_work);
883 }
884
885 err = len;
886
887 done:
888 release_sock(sk);
889 return err;
890
891 drop:
892 kfree_skb(skb);
893 goto done;
894 }
895
896 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
897 {
898 struct hci_ufilter uf = { .opcode = 0 };
899 struct sock *sk = sock->sk;
900 int err = 0, opt = 0;
901
902 BT_DBG("sk %p, opt %d", sk, optname);
903
904 lock_sock(sk);
905
906 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
907 err = -EINVAL;
908 goto done;
909 }
910
911 switch (optname) {
912 case HCI_DATA_DIR:
913 if (get_user(opt, (int __user *)optval)) {
914 err = -EFAULT;
915 break;
916 }
917
918 if (opt)
919 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
920 else
921 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
922 break;
923
924 case HCI_TIME_STAMP:
925 if (get_user(opt, (int __user *)optval)) {
926 err = -EFAULT;
927 break;
928 }
929
930 if (opt)
931 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
932 else
933 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
934 break;
935
936 case HCI_FILTER:
937 {
938 struct hci_filter *f = &hci_pi(sk)->filter;
939
940 uf.type_mask = f->type_mask;
941 uf.opcode = f->opcode;
942 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
943 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
944 }
945
946 len = min_t(unsigned int, len, sizeof(uf));
947 if (copy_from_user(&uf, optval, len)) {
948 err = -EFAULT;
949 break;
950 }
951
952 if (!capable(CAP_NET_RAW)) {
953 uf.type_mask &= hci_sec_filter.type_mask;
954 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
955 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
956 }
957
958 {
959 struct hci_filter *f = &hci_pi(sk)->filter;
960
961 f->type_mask = uf.type_mask;
962 f->opcode = uf.opcode;
963 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
964 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
965 }
966 break;
967
968 default:
969 err = -ENOPROTOOPT;
970 break;
971 }
972
973 done:
974 release_sock(sk);
975 return err;
976 }
977
978 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
979 {
980 struct hci_ufilter uf;
981 struct sock *sk = sock->sk;
982 int len, opt, err = 0;
983
984 BT_DBG("sk %p, opt %d", sk, optname);
985
986 if (get_user(len, optlen))
987 return -EFAULT;
988
989 lock_sock(sk);
990
991 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
992 err = -EINVAL;
993 goto done;
994 }
995
996 switch (optname) {
997 case HCI_DATA_DIR:
998 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
999 opt = 1;
1000 else
1001 opt = 0;
1002
1003 if (put_user(opt, optval))
1004 err = -EFAULT;
1005 break;
1006
1007 case HCI_TIME_STAMP:
1008 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1009 opt = 1;
1010 else
1011 opt = 0;
1012
1013 if (put_user(opt, optval))
1014 err = -EFAULT;
1015 break;
1016
1017 case HCI_FILTER:
1018 {
1019 struct hci_filter *f = &hci_pi(sk)->filter;
1020
1021 uf.type_mask = f->type_mask;
1022 uf.opcode = f->opcode;
1023 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1024 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1025 }
1026
1027 len = min_t(unsigned int, len, sizeof(uf));
1028 if (copy_to_user(optval, &uf, len))
1029 err = -EFAULT;
1030 break;
1031
1032 default:
1033 err = -ENOPROTOOPT;
1034 break;
1035 }
1036
1037 done:
1038 release_sock(sk);
1039 return err;
1040 }
1041
1042 static const struct proto_ops hci_sock_ops = {
1043 .family = PF_BLUETOOTH,
1044 .owner = THIS_MODULE,
1045 .release = hci_sock_release,
1046 .bind = hci_sock_bind,
1047 .getname = hci_sock_getname,
1048 .sendmsg = hci_sock_sendmsg,
1049 .recvmsg = hci_sock_recvmsg,
1050 .ioctl = hci_sock_ioctl,
1051 .poll = datagram_poll,
1052 .listen = sock_no_listen,
1053 .shutdown = sock_no_shutdown,
1054 .setsockopt = hci_sock_setsockopt,
1055 .getsockopt = hci_sock_getsockopt,
1056 .connect = sock_no_connect,
1057 .socketpair = sock_no_socketpair,
1058 .accept = sock_no_accept,
1059 .mmap = sock_no_mmap
1060 };
1061
1062 static struct proto hci_sk_proto = {
1063 .name = "HCI",
1064 .owner = THIS_MODULE,
1065 .obj_size = sizeof(struct hci_pinfo)
1066 };
1067
1068 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1069 int kern)
1070 {
1071 struct sock *sk;
1072
1073 BT_DBG("sock %p", sock);
1074
1075 if (sock->type != SOCK_RAW)
1076 return -ESOCKTNOSUPPORT;
1077
1078 sock->ops = &hci_sock_ops;
1079
1080 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1081 if (!sk)
1082 return -ENOMEM;
1083
1084 sock_init_data(sock, sk);
1085
1086 sock_reset_flag(sk, SOCK_ZAPPED);
1087
1088 sk->sk_protocol = protocol;
1089
1090 sock->state = SS_UNCONNECTED;
1091 sk->sk_state = BT_OPEN;
1092
1093 bt_sock_link(&hci_sk_list, sk);
1094 return 0;
1095 }
1096
1097 static const struct net_proto_family hci_sock_family_ops = {
1098 .family = PF_BLUETOOTH,
1099 .owner = THIS_MODULE,
1100 .create = hci_sock_create,
1101 };
1102
1103 int __init hci_sock_init(void)
1104 {
1105 int err;
1106
1107 err = proto_register(&hci_sk_proto, 0);
1108 if (err < 0)
1109 return err;
1110
1111 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1112 if (err < 0)
1113 goto error;
1114
1115 BT_INFO("HCI socket layer initialized");
1116
1117 return 0;
1118
1119 error:
1120 BT_ERR("HCI socket registration failed");
1121 proto_unregister(&hci_sk_proto);
1122 return err;
1123 }
1124
1125 void hci_sock_cleanup(void)
1126 {
1127 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1128 BT_ERR("HCI socket unregistration failed");
1129
1130 proto_unregister(&hci_sk_proto);
1131 }
This page took 0.053993 seconds and 4 git commands to generate.