Bluetooth: Permit BT_SECURITY also for L2CAP raw sockets
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #define VERSION "2.13"
54
55 static u32 l2cap_feat_mask = 0x0080;
56 static u8 l2cap_fixed_chan[8] = { 0x02, };
57
58 static const struct proto_ops l2cap_sock_ops;
59
60 static struct bt_sock_list l2cap_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
62 };
63
64 static void __l2cap_sock_close(struct sock *sk, int reason);
65 static void l2cap_sock_close(struct sock *sk);
66 static void l2cap_sock_kill(struct sock *sk);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70
71 /* ---- L2CAP timers ---- */
72 static void l2cap_sock_timeout(unsigned long arg)
73 {
74 struct sock *sk = (struct sock *) arg;
75 int reason;
76
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
78
79 bh_lock_sock(sk);
80
81 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
82 reason = ECONNREFUSED;
83 else if (sk->sk_state == BT_CONNECT &&
84 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
85 reason = ECONNREFUSED;
86 else
87 reason = ETIMEDOUT;
88
89 __l2cap_sock_close(sk, reason);
90
91 bh_unlock_sock(sk);
92
93 l2cap_sock_kill(sk);
94 sock_put(sk);
95 }
96
97 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
98 {
99 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
100 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 }
102
103 static void l2cap_sock_clear_timer(struct sock *sk)
104 {
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
106 sk_stop_timer(sk, &sk->sk_timer);
107 }
108
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 {
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
115 break;
116 }
117 return s;
118 }
119
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 {
122 struct sock *s;
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
125 break;
126 }
127 return s;
128 }
129
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 read_lock(&l->lock);
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
140 }
141
142 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 {
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
148 }
149 return s;
150 }
151
152 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s);
158 read_unlock(&l->lock);
159 return s;
160 }
161
162 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
163 {
164 u16 cid = 0x0040;
165
166 for (; cid < 0xffff; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid))
168 return cid;
169 }
170
171 return 0;
172 }
173
174 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
175 {
176 sock_hold(sk);
177
178 if (l->head)
179 l2cap_pi(l->head)->prev_c = sk;
180
181 l2cap_pi(sk)->next_c = l->head;
182 l2cap_pi(sk)->prev_c = NULL;
183 l->head = sk;
184 }
185
186 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
187 {
188 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
189
190 write_lock_bh(&l->lock);
191 if (sk == l->head)
192 l->head = next;
193
194 if (next)
195 l2cap_pi(next)->prev_c = prev;
196 if (prev)
197 l2cap_pi(prev)->next_c = next;
198 write_unlock_bh(&l->lock);
199
200 __sock_put(sk);
201 }
202
203 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
204 {
205 struct l2cap_chan_list *l = &conn->chan_list;
206
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
208
209 conn->disc_reason = 0x13;
210
211 l2cap_pi(sk)->conn = conn;
212
213 if (sk->sk_type == SOCK_SEQPACKET) {
214 /* Alloc CID for connection-oriented socket */
215 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
216 } else if (sk->sk_type == SOCK_DGRAM) {
217 /* Connectionless socket */
218 l2cap_pi(sk)->scid = 0x0002;
219 l2cap_pi(sk)->dcid = 0x0002;
220 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
221 } else {
222 /* Raw socket can send/recv signalling messages only */
223 l2cap_pi(sk)->scid = 0x0001;
224 l2cap_pi(sk)->dcid = 0x0001;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
226 }
227
228 __l2cap_chan_link(l, sk);
229
230 if (parent)
231 bt_accept_enqueue(parent, sk);
232 }
233
234 /* Delete channel.
235 * Must be called on the locked socket. */
236 static void l2cap_chan_del(struct sock *sk, int err)
237 {
238 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
239 struct sock *parent = bt_sk(sk)->parent;
240
241 l2cap_sock_clear_timer(sk);
242
243 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
244
245 if (conn) {
246 /* Unlink from channel list */
247 l2cap_chan_unlink(&conn->chan_list, sk);
248 l2cap_pi(sk)->conn = NULL;
249 hci_conn_put(conn->hcon);
250 }
251
252 sk->sk_state = BT_CLOSED;
253 sock_set_flag(sk, SOCK_ZAPPED);
254
255 if (err)
256 sk->sk_err = err;
257
258 if (parent) {
259 bt_accept_unlink(sk);
260 parent->sk_data_ready(parent, 0);
261 } else
262 sk->sk_state_change(sk);
263 }
264
265 /* Service level security */
266 static inline int l2cap_check_security(struct sock *sk)
267 {
268 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
269 __u8 auth_type;
270
271 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
273 auth_type = HCI_AT_NO_BONDING_MITM;
274 else
275 auth_type = HCI_AT_NO_BONDING;
276
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
279 } else {
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 auth_type = HCI_AT_GENERAL_BONDING_MITM;
283 break;
284 case BT_SECURITY_MEDIUM:
285 auth_type = HCI_AT_GENERAL_BONDING;
286 break;
287 default:
288 auth_type = HCI_AT_NO_BONDING;
289 break;
290 }
291 }
292
293 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
294 auth_type);
295 }
296
297 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
298 {
299 u8 id;
300
301 /* Get next available identificator.
302 * 1 - 128 are used by kernel.
303 * 129 - 199 are reserved.
304 * 200 - 254 are used by utilities like l2ping, etc.
305 */
306
307 spin_lock_bh(&conn->lock);
308
309 if (++conn->tx_ident > 128)
310 conn->tx_ident = 1;
311
312 id = conn->tx_ident;
313
314 spin_unlock_bh(&conn->lock);
315
316 return id;
317 }
318
319 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
320 {
321 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
322
323 BT_DBG("code 0x%2.2x", code);
324
325 if (!skb)
326 return -ENOMEM;
327
328 return hci_send_acl(conn->hcon, skb, 0);
329 }
330
331 static void l2cap_do_start(struct sock *sk)
332 {
333 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
334
335 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
336 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
337 return;
338
339 if (l2cap_check_security(sk)) {
340 struct l2cap_conn_req req;
341 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
342 req.psm = l2cap_pi(sk)->psm;
343
344 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
345
346 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
347 L2CAP_CONN_REQ, sizeof(req), &req);
348 }
349 } else {
350 struct l2cap_info_req req;
351 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
352
353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
354 conn->info_ident = l2cap_get_ident(conn);
355
356 mod_timer(&conn->info_timer, jiffies +
357 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
358
359 l2cap_send_cmd(conn, conn->info_ident,
360 L2CAP_INFO_REQ, sizeof(req), &req);
361 }
362 }
363
364 /* ---- L2CAP connections ---- */
365 static void l2cap_conn_start(struct l2cap_conn *conn)
366 {
367 struct l2cap_chan_list *l = &conn->chan_list;
368 struct sock *sk;
369
370 BT_DBG("conn %p", conn);
371
372 read_lock(&l->lock);
373
374 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
375 bh_lock_sock(sk);
376
377 if (sk->sk_type != SOCK_SEQPACKET) {
378 bh_unlock_sock(sk);
379 continue;
380 }
381
382 if (sk->sk_state == BT_CONNECT) {
383 if (l2cap_check_security(sk)) {
384 struct l2cap_conn_req req;
385 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
386 req.psm = l2cap_pi(sk)->psm;
387
388 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
389
390 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
391 L2CAP_CONN_REQ, sizeof(req), &req);
392 }
393 } else if (sk->sk_state == BT_CONNECT2) {
394 struct l2cap_conn_rsp rsp;
395 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
396 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
397
398 if (l2cap_check_security(sk)) {
399 if (bt_sk(sk)->defer_setup) {
400 struct sock *parent = bt_sk(sk)->parent;
401 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
402 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
403 parent->sk_data_ready(parent, 0);
404
405 } else {
406 sk->sk_state = BT_CONFIG;
407 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
408 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
409 }
410 } else {
411 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
412 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
413 }
414
415 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
416 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
417 }
418
419 bh_unlock_sock(sk);
420 }
421
422 read_unlock(&l->lock);
423 }
424
425 static void l2cap_conn_ready(struct l2cap_conn *conn)
426 {
427 struct l2cap_chan_list *l = &conn->chan_list;
428 struct sock *sk;
429
430 BT_DBG("conn %p", conn);
431
432 read_lock(&l->lock);
433
434 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
435 bh_lock_sock(sk);
436
437 if (sk->sk_type != SOCK_SEQPACKET) {
438 l2cap_sock_clear_timer(sk);
439 sk->sk_state = BT_CONNECTED;
440 sk->sk_state_change(sk);
441 } else if (sk->sk_state == BT_CONNECT)
442 l2cap_do_start(sk);
443
444 bh_unlock_sock(sk);
445 }
446
447 read_unlock(&l->lock);
448 }
449
450 /* Notify sockets that we cannot guaranty reliability anymore */
451 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
452 {
453 struct l2cap_chan_list *l = &conn->chan_list;
454 struct sock *sk;
455
456 BT_DBG("conn %p", conn);
457
458 read_lock(&l->lock);
459
460 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
461 if (l2cap_pi(sk)->force_reliable)
462 sk->sk_err = err;
463 }
464
465 read_unlock(&l->lock);
466 }
467
468 static void l2cap_info_timeout(unsigned long arg)
469 {
470 struct l2cap_conn *conn = (void *) arg;
471
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
473 conn->info_ident = 0;
474
475 l2cap_conn_start(conn);
476 }
477
478 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
479 {
480 struct l2cap_conn *conn = hcon->l2cap_data;
481
482 if (conn || status)
483 return conn;
484
485 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
486 if (!conn)
487 return NULL;
488
489 hcon->l2cap_data = conn;
490 conn->hcon = hcon;
491
492 BT_DBG("hcon %p conn %p", hcon, conn);
493
494 conn->mtu = hcon->hdev->acl_mtu;
495 conn->src = &hcon->hdev->bdaddr;
496 conn->dst = &hcon->dst;
497
498 conn->feat_mask = 0;
499
500 setup_timer(&conn->info_timer, l2cap_info_timeout,
501 (unsigned long) conn);
502
503 spin_lock_init(&conn->lock);
504 rwlock_init(&conn->chan_list.lock);
505
506 conn->disc_reason = 0x13;
507
508 return conn;
509 }
510
511 static void l2cap_conn_del(struct hci_conn *hcon, int err)
512 {
513 struct l2cap_conn *conn = hcon->l2cap_data;
514 struct sock *sk;
515
516 if (!conn)
517 return;
518
519 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
520
521 if (conn->rx_skb)
522 kfree_skb(conn->rx_skb);
523
524 /* Kill channels */
525 while ((sk = conn->chan_list.head)) {
526 bh_lock_sock(sk);
527 l2cap_chan_del(sk, err);
528 bh_unlock_sock(sk);
529 l2cap_sock_kill(sk);
530 }
531
532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
533 del_timer_sync(&conn->info_timer);
534
535 hcon->l2cap_data = NULL;
536 kfree(conn);
537 }
538
539 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
540 {
541 struct l2cap_chan_list *l = &conn->chan_list;
542 write_lock_bh(&l->lock);
543 __l2cap_chan_add(conn, sk, parent);
544 write_unlock_bh(&l->lock);
545 }
546
547 /* ---- Socket interface ---- */
548 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
549 {
550 struct sock *sk;
551 struct hlist_node *node;
552 sk_for_each(sk, node, &l2cap_sk_list.head)
553 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
554 goto found;
555 sk = NULL;
556 found:
557 return sk;
558 }
559
560 /* Find socket with psm and source bdaddr.
561 * Returns closest match.
562 */
563 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
564 {
565 struct sock *sk = NULL, *sk1 = NULL;
566 struct hlist_node *node;
567
568 sk_for_each(sk, node, &l2cap_sk_list.head) {
569 if (state && sk->sk_state != state)
570 continue;
571
572 if (l2cap_pi(sk)->psm == psm) {
573 /* Exact match. */
574 if (!bacmp(&bt_sk(sk)->src, src))
575 break;
576
577 /* Closest match */
578 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
579 sk1 = sk;
580 }
581 }
582 return node ? sk : sk1;
583 }
584
585 /* Find socket with given address (psm, src).
586 * Returns locked socket */
587 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
588 {
589 struct sock *s;
590 read_lock(&l2cap_sk_list.lock);
591 s = __l2cap_get_sock_by_psm(state, psm, src);
592 if (s) bh_lock_sock(s);
593 read_unlock(&l2cap_sk_list.lock);
594 return s;
595 }
596
597 static void l2cap_sock_destruct(struct sock *sk)
598 {
599 BT_DBG("sk %p", sk);
600
601 skb_queue_purge(&sk->sk_receive_queue);
602 skb_queue_purge(&sk->sk_write_queue);
603 }
604
605 static void l2cap_sock_cleanup_listen(struct sock *parent)
606 {
607 struct sock *sk;
608
609 BT_DBG("parent %p", parent);
610
611 /* Close not yet accepted channels */
612 while ((sk = bt_accept_dequeue(parent, NULL)))
613 l2cap_sock_close(sk);
614
615 parent->sk_state = BT_CLOSED;
616 sock_set_flag(parent, SOCK_ZAPPED);
617 }
618
619 /* Kill socket (only if zapped and orphan)
620 * Must be called on unlocked socket.
621 */
622 static void l2cap_sock_kill(struct sock *sk)
623 {
624 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
625 return;
626
627 BT_DBG("sk %p state %d", sk, sk->sk_state);
628
629 /* Kill poor orphan */
630 bt_sock_unlink(&l2cap_sk_list, sk);
631 sock_set_flag(sk, SOCK_DEAD);
632 sock_put(sk);
633 }
634
635 static void __l2cap_sock_close(struct sock *sk, int reason)
636 {
637 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
638
639 switch (sk->sk_state) {
640 case BT_LISTEN:
641 l2cap_sock_cleanup_listen(sk);
642 break;
643
644 case BT_CONNECTED:
645 case BT_CONFIG:
646 if (sk->sk_type == SOCK_SEQPACKET) {
647 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
648 struct l2cap_disconn_req req;
649
650 sk->sk_state = BT_DISCONN;
651 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
652
653 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
654 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
655 l2cap_send_cmd(conn, l2cap_get_ident(conn),
656 L2CAP_DISCONN_REQ, sizeof(req), &req);
657 } else
658 l2cap_chan_del(sk, reason);
659 break;
660
661 case BT_CONNECT2:
662 if (sk->sk_type == SOCK_SEQPACKET) {
663 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
664 struct l2cap_conn_rsp rsp;
665 __u16 result;
666
667 if (bt_sk(sk)->defer_setup)
668 result = L2CAP_CR_SEC_BLOCK;
669 else
670 result = L2CAP_CR_BAD_PSM;
671
672 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
673 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
674 rsp.result = cpu_to_le16(result);
675 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
676 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
677 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
678 } else
679 l2cap_chan_del(sk, reason);
680 break;
681
682 case BT_CONNECT:
683 case BT_DISCONN:
684 l2cap_chan_del(sk, reason);
685 break;
686
687 default:
688 sock_set_flag(sk, SOCK_ZAPPED);
689 break;
690 }
691 }
692
693 /* Must be called on unlocked socket. */
694 static void l2cap_sock_close(struct sock *sk)
695 {
696 l2cap_sock_clear_timer(sk);
697 lock_sock(sk);
698 __l2cap_sock_close(sk, ECONNRESET);
699 release_sock(sk);
700 l2cap_sock_kill(sk);
701 }
702
703 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
704 {
705 struct l2cap_pinfo *pi = l2cap_pi(sk);
706
707 BT_DBG("sk %p", sk);
708
709 if (parent) {
710 sk->sk_type = parent->sk_type;
711 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
712
713 pi->imtu = l2cap_pi(parent)->imtu;
714 pi->omtu = l2cap_pi(parent)->omtu;
715 pi->sec_level = l2cap_pi(parent)->sec_level;
716 pi->role_switch = l2cap_pi(parent)->role_switch;
717 pi->force_reliable = l2cap_pi(parent)->force_reliable;
718 } else {
719 pi->imtu = L2CAP_DEFAULT_MTU;
720 pi->omtu = 0;
721 pi->sec_level = BT_SECURITY_LOW;
722 pi->role_switch = 0;
723 pi->force_reliable = 0;
724 }
725
726 /* Default config options */
727 pi->conf_len = 0;
728 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
729 }
730
731 static struct proto l2cap_proto = {
732 .name = "L2CAP",
733 .owner = THIS_MODULE,
734 .obj_size = sizeof(struct l2cap_pinfo)
735 };
736
737 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
738 {
739 struct sock *sk;
740
741 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
742 if (!sk)
743 return NULL;
744
745 sock_init_data(sock, sk);
746 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
747
748 sk->sk_destruct = l2cap_sock_destruct;
749 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
750
751 sock_reset_flag(sk, SOCK_ZAPPED);
752
753 sk->sk_protocol = proto;
754 sk->sk_state = BT_OPEN;
755
756 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
757
758 bt_sock_link(&l2cap_sk_list, sk);
759 return sk;
760 }
761
762 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
763 {
764 struct sock *sk;
765
766 BT_DBG("sock %p", sock);
767
768 sock->state = SS_UNCONNECTED;
769
770 if (sock->type != SOCK_SEQPACKET &&
771 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
772 return -ESOCKTNOSUPPORT;
773
774 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
775 return -EPERM;
776
777 sock->ops = &l2cap_sock_ops;
778
779 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
780 if (!sk)
781 return -ENOMEM;
782
783 l2cap_sock_init(sk, NULL);
784 return 0;
785 }
786
787 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
788 {
789 struct sock *sk = sock->sk;
790 struct sockaddr_l2 la;
791 int len, err = 0;
792
793 BT_DBG("sk %p", sk);
794
795 if (!addr || addr->sa_family != AF_BLUETOOTH)
796 return -EINVAL;
797
798 memset(&la, 0, sizeof(la));
799 len = min_t(unsigned int, sizeof(la), alen);
800 memcpy(&la, addr, len);
801
802 if (la.l2_cid)
803 return -EINVAL;
804
805 lock_sock(sk);
806
807 if (sk->sk_state != BT_OPEN) {
808 err = -EBADFD;
809 goto done;
810 }
811
812 if (la.l2_psm && btohs(la.l2_psm) < 0x1001 &&
813 !capable(CAP_NET_BIND_SERVICE)) {
814 err = -EACCES;
815 goto done;
816 }
817
818 write_lock_bh(&l2cap_sk_list.lock);
819
820 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
821 err = -EADDRINUSE;
822 } else {
823 /* Save source address */
824 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
825 l2cap_pi(sk)->psm = la.l2_psm;
826 l2cap_pi(sk)->sport = la.l2_psm;
827 sk->sk_state = BT_BOUND;
828
829 if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003)
830 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
831 }
832
833 write_unlock_bh(&l2cap_sk_list.lock);
834
835 done:
836 release_sock(sk);
837 return err;
838 }
839
840 static int l2cap_do_connect(struct sock *sk)
841 {
842 bdaddr_t *src = &bt_sk(sk)->src;
843 bdaddr_t *dst = &bt_sk(sk)->dst;
844 struct l2cap_conn *conn;
845 struct hci_conn *hcon;
846 struct hci_dev *hdev;
847 __u8 auth_type;
848 int err = 0;
849
850 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
851 l2cap_pi(sk)->psm);
852
853 if (!(hdev = hci_get_route(dst, src)))
854 return -EHOSTUNREACH;
855
856 hci_dev_lock_bh(hdev);
857
858 err = -ENOMEM;
859
860 if (sk->sk_type == SOCK_RAW) {
861 switch (l2cap_pi(sk)->sec_level) {
862 case BT_SECURITY_HIGH:
863 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
864 break;
865 case BT_SECURITY_MEDIUM:
866 auth_type = HCI_AT_DEDICATED_BONDING;
867 break;
868 default:
869 auth_type = HCI_AT_NO_BONDING;
870 break;
871 }
872 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
873 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
874 auth_type = HCI_AT_NO_BONDING_MITM;
875 else
876 auth_type = HCI_AT_NO_BONDING;
877
878 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
879 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
880 } else {
881 switch (l2cap_pi(sk)->sec_level) {
882 case BT_SECURITY_HIGH:
883 auth_type = HCI_AT_GENERAL_BONDING_MITM;
884 break;
885 case BT_SECURITY_MEDIUM:
886 auth_type = HCI_AT_GENERAL_BONDING;
887 break;
888 default:
889 auth_type = HCI_AT_NO_BONDING;
890 break;
891 }
892 }
893
894 hcon = hci_connect(hdev, ACL_LINK, dst,
895 l2cap_pi(sk)->sec_level, auth_type);
896 if (!hcon)
897 goto done;
898
899 conn = l2cap_conn_add(hcon, 0);
900 if (!conn) {
901 hci_conn_put(hcon);
902 goto done;
903 }
904
905 err = 0;
906
907 /* Update source addr of the socket */
908 bacpy(src, conn->src);
909
910 l2cap_chan_add(conn, sk, NULL);
911
912 sk->sk_state = BT_CONNECT;
913 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
914
915 if (hcon->state == BT_CONNECTED) {
916 if (sk->sk_type != SOCK_SEQPACKET) {
917 l2cap_sock_clear_timer(sk);
918 sk->sk_state = BT_CONNECTED;
919 } else
920 l2cap_do_start(sk);
921 }
922
923 done:
924 hci_dev_unlock_bh(hdev);
925 hci_dev_put(hdev);
926 return err;
927 }
928
929 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
930 {
931 struct sock *sk = sock->sk;
932 struct sockaddr_l2 la;
933 int len, err = 0;
934
935 BT_DBG("sk %p", sk);
936
937 if (!addr || addr->sa_family != AF_BLUETOOTH)
938 return -EINVAL;
939
940 memset(&la, 0, sizeof(la));
941 len = min_t(unsigned int, sizeof(la), alen);
942 memcpy(&la, addr, len);
943
944 if (la.l2_cid)
945 return -EINVAL;
946
947 lock_sock(sk);
948
949 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
950 err = -EINVAL;
951 goto done;
952 }
953
954 switch(sk->sk_state) {
955 case BT_CONNECT:
956 case BT_CONNECT2:
957 case BT_CONFIG:
958 /* Already connecting */
959 goto wait;
960
961 case BT_CONNECTED:
962 /* Already connected */
963 goto done;
964
965 case BT_OPEN:
966 case BT_BOUND:
967 /* Can connect */
968 break;
969
970 default:
971 err = -EBADFD;
972 goto done;
973 }
974
975 /* Set destination address and psm */
976 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
977 l2cap_pi(sk)->psm = la.l2_psm;
978
979 if ((err = l2cap_do_connect(sk)))
980 goto done;
981
982 wait:
983 err = bt_sock_wait_state(sk, BT_CONNECTED,
984 sock_sndtimeo(sk, flags & O_NONBLOCK));
985 done:
986 release_sock(sk);
987 return err;
988 }
989
990 static int l2cap_sock_listen(struct socket *sock, int backlog)
991 {
992 struct sock *sk = sock->sk;
993 int err = 0;
994
995 BT_DBG("sk %p backlog %d", sk, backlog);
996
997 lock_sock(sk);
998
999 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1000 err = -EBADFD;
1001 goto done;
1002 }
1003
1004 if (!l2cap_pi(sk)->psm) {
1005 bdaddr_t *src = &bt_sk(sk)->src;
1006 u16 psm;
1007
1008 err = -EINVAL;
1009
1010 write_lock_bh(&l2cap_sk_list.lock);
1011
1012 for (psm = 0x1001; psm < 0x1100; psm += 2)
1013 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
1014 l2cap_pi(sk)->psm = htobs(psm);
1015 l2cap_pi(sk)->sport = htobs(psm);
1016 err = 0;
1017 break;
1018 }
1019
1020 write_unlock_bh(&l2cap_sk_list.lock);
1021
1022 if (err < 0)
1023 goto done;
1024 }
1025
1026 sk->sk_max_ack_backlog = backlog;
1027 sk->sk_ack_backlog = 0;
1028 sk->sk_state = BT_LISTEN;
1029
1030 done:
1031 release_sock(sk);
1032 return err;
1033 }
1034
1035 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1036 {
1037 DECLARE_WAITQUEUE(wait, current);
1038 struct sock *sk = sock->sk, *nsk;
1039 long timeo;
1040 int err = 0;
1041
1042 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1043
1044 if (sk->sk_state != BT_LISTEN) {
1045 err = -EBADFD;
1046 goto done;
1047 }
1048
1049 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1050
1051 BT_DBG("sk %p timeo %ld", sk, timeo);
1052
1053 /* Wait for an incoming connection. (wake-one). */
1054 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1055 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1056 set_current_state(TASK_INTERRUPTIBLE);
1057 if (!timeo) {
1058 err = -EAGAIN;
1059 break;
1060 }
1061
1062 release_sock(sk);
1063 timeo = schedule_timeout(timeo);
1064 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1065
1066 if (sk->sk_state != BT_LISTEN) {
1067 err = -EBADFD;
1068 break;
1069 }
1070
1071 if (signal_pending(current)) {
1072 err = sock_intr_errno(timeo);
1073 break;
1074 }
1075 }
1076 set_current_state(TASK_RUNNING);
1077 remove_wait_queue(sk->sk_sleep, &wait);
1078
1079 if (err)
1080 goto done;
1081
1082 newsock->state = SS_CONNECTED;
1083
1084 BT_DBG("new socket %p", nsk);
1085
1086 done:
1087 release_sock(sk);
1088 return err;
1089 }
1090
1091 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1092 {
1093 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1094 struct sock *sk = sock->sk;
1095
1096 BT_DBG("sock %p, sk %p", sock, sk);
1097
1098 addr->sa_family = AF_BLUETOOTH;
1099 *len = sizeof(struct sockaddr_l2);
1100
1101 if (peer) {
1102 la->l2_psm = l2cap_pi(sk)->psm;
1103 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1104 la->l2_cid = htobs(l2cap_pi(sk)->dcid);
1105 } else {
1106 la->l2_psm = l2cap_pi(sk)->sport;
1107 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1108 la->l2_cid = htobs(l2cap_pi(sk)->scid);
1109 }
1110
1111 return 0;
1112 }
1113
1114 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1115 {
1116 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1117 struct sk_buff *skb, **frag;
1118 int err, hlen, count, sent=0;
1119 struct l2cap_hdr *lh;
1120
1121 BT_DBG("sk %p len %d", sk, len);
1122
1123 /* First fragment (with L2CAP header) */
1124 if (sk->sk_type == SOCK_DGRAM)
1125 hlen = L2CAP_HDR_SIZE + 2;
1126 else
1127 hlen = L2CAP_HDR_SIZE;
1128
1129 count = min_t(unsigned int, (conn->mtu - hlen), len);
1130
1131 skb = bt_skb_send_alloc(sk, hlen + count,
1132 msg->msg_flags & MSG_DONTWAIT, &err);
1133 if (!skb)
1134 return err;
1135
1136 /* Create L2CAP header */
1137 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1138 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1139 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1140
1141 if (sk->sk_type == SOCK_DGRAM)
1142 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1143
1144 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1145 err = -EFAULT;
1146 goto fail;
1147 }
1148
1149 sent += count;
1150 len -= count;
1151
1152 /* Continuation fragments (no L2CAP header) */
1153 frag = &skb_shinfo(skb)->frag_list;
1154 while (len) {
1155 count = min_t(unsigned int, conn->mtu, len);
1156
1157 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1158 if (!*frag)
1159 goto fail;
1160
1161 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1162 err = -EFAULT;
1163 goto fail;
1164 }
1165
1166 sent += count;
1167 len -= count;
1168
1169 frag = &(*frag)->next;
1170 }
1171
1172 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1173 goto fail;
1174
1175 return sent;
1176
1177 fail:
1178 kfree_skb(skb);
1179 return err;
1180 }
1181
1182 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1183 {
1184 struct sock *sk = sock->sk;
1185 int err = 0;
1186
1187 BT_DBG("sock %p, sk %p", sock, sk);
1188
1189 err = sock_error(sk);
1190 if (err)
1191 return err;
1192
1193 if (msg->msg_flags & MSG_OOB)
1194 return -EOPNOTSUPP;
1195
1196 /* Check outgoing MTU */
1197 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1198 return -EINVAL;
1199
1200 lock_sock(sk);
1201
1202 if (sk->sk_state == BT_CONNECTED)
1203 err = l2cap_do_send(sk, msg, len);
1204 else
1205 err = -ENOTCONN;
1206
1207 release_sock(sk);
1208 return err;
1209 }
1210
1211 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1212 {
1213 struct sock *sk = sock->sk;
1214
1215 lock_sock(sk);
1216
1217 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1218 struct l2cap_conn_rsp rsp;
1219
1220 sk->sk_state = BT_CONFIG;
1221
1222 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1223 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1224 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1225 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1226 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1227 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1228
1229 release_sock(sk);
1230 return 0;
1231 }
1232
1233 release_sock(sk);
1234
1235 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1236 }
1237
1238 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1239 {
1240 struct sock *sk = sock->sk;
1241 struct l2cap_options opts;
1242 int len, err = 0;
1243 u32 opt;
1244
1245 BT_DBG("sk %p", sk);
1246
1247 lock_sock(sk);
1248
1249 switch (optname) {
1250 case L2CAP_OPTIONS:
1251 opts.imtu = l2cap_pi(sk)->imtu;
1252 opts.omtu = l2cap_pi(sk)->omtu;
1253 opts.flush_to = l2cap_pi(sk)->flush_to;
1254 opts.mode = L2CAP_MODE_BASIC;
1255
1256 len = min_t(unsigned int, sizeof(opts), optlen);
1257 if (copy_from_user((char *) &opts, optval, len)) {
1258 err = -EFAULT;
1259 break;
1260 }
1261
1262 l2cap_pi(sk)->imtu = opts.imtu;
1263 l2cap_pi(sk)->omtu = opts.omtu;
1264 break;
1265
1266 case L2CAP_LM:
1267 if (get_user(opt, (u32 __user *) optval)) {
1268 err = -EFAULT;
1269 break;
1270 }
1271
1272 if (opt & L2CAP_LM_AUTH)
1273 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1274 if (opt & L2CAP_LM_ENCRYPT)
1275 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1276 if (opt & L2CAP_LM_SECURE)
1277 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1278
1279 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1280 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1281 break;
1282
1283 default:
1284 err = -ENOPROTOOPT;
1285 break;
1286 }
1287
1288 release_sock(sk);
1289 return err;
1290 }
1291
1292 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1293 {
1294 struct sock *sk = sock->sk;
1295 struct bt_security sec;
1296 int len, err = 0;
1297 u32 opt;
1298
1299 BT_DBG("sk %p", sk);
1300
1301 if (level == SOL_L2CAP)
1302 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1303
1304 if (level != SOL_BLUETOOTH)
1305 return -ENOPROTOOPT;
1306
1307 lock_sock(sk);
1308
1309 switch (optname) {
1310 case BT_SECURITY:
1311 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1312 err = -EINVAL;
1313 break;
1314 }
1315
1316 sec.level = BT_SECURITY_LOW;
1317
1318 len = min_t(unsigned int, sizeof(sec), optlen);
1319 if (copy_from_user((char *) &sec, optval, len)) {
1320 err = -EFAULT;
1321 break;
1322 }
1323
1324 if (sec.level < BT_SECURITY_LOW ||
1325 sec.level > BT_SECURITY_HIGH) {
1326 err = -EINVAL;
1327 break;
1328 }
1329
1330 l2cap_pi(sk)->sec_level = sec.level;
1331 break;
1332
1333 case BT_DEFER_SETUP:
1334 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1335 err = -EINVAL;
1336 break;
1337 }
1338
1339 if (get_user(opt, (u32 __user *) optval)) {
1340 err = -EFAULT;
1341 break;
1342 }
1343
1344 bt_sk(sk)->defer_setup = opt;
1345 break;
1346
1347 default:
1348 err = -ENOPROTOOPT;
1349 break;
1350 }
1351
1352 release_sock(sk);
1353 return err;
1354 }
1355
1356 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1357 {
1358 struct sock *sk = sock->sk;
1359 struct l2cap_options opts;
1360 struct l2cap_conninfo cinfo;
1361 int len, err = 0;
1362 u32 opt;
1363
1364 BT_DBG("sk %p", sk);
1365
1366 if (get_user(len, optlen))
1367 return -EFAULT;
1368
1369 lock_sock(sk);
1370
1371 switch (optname) {
1372 case L2CAP_OPTIONS:
1373 opts.imtu = l2cap_pi(sk)->imtu;
1374 opts.omtu = l2cap_pi(sk)->omtu;
1375 opts.flush_to = l2cap_pi(sk)->flush_to;
1376 opts.mode = L2CAP_MODE_BASIC;
1377
1378 len = min_t(unsigned int, len, sizeof(opts));
1379 if (copy_to_user(optval, (char *) &opts, len))
1380 err = -EFAULT;
1381
1382 break;
1383
1384 case L2CAP_LM:
1385 switch (l2cap_pi(sk)->sec_level) {
1386 case BT_SECURITY_LOW:
1387 opt = L2CAP_LM_AUTH;
1388 break;
1389 case BT_SECURITY_MEDIUM:
1390 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1391 break;
1392 case BT_SECURITY_HIGH:
1393 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1394 L2CAP_LM_SECURE;
1395 break;
1396 default:
1397 opt = 0;
1398 break;
1399 }
1400
1401 if (l2cap_pi(sk)->role_switch)
1402 opt |= L2CAP_LM_MASTER;
1403
1404 if (l2cap_pi(sk)->force_reliable)
1405 opt |= L2CAP_LM_RELIABLE;
1406
1407 if (put_user(opt, (u32 __user *) optval))
1408 err = -EFAULT;
1409 break;
1410
1411 case L2CAP_CONNINFO:
1412 if (sk->sk_state != BT_CONNECTED &&
1413 !(sk->sk_state == BT_CONNECT2 &&
1414 bt_sk(sk)->defer_setup)) {
1415 err = -ENOTCONN;
1416 break;
1417 }
1418
1419 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1420 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1421
1422 len = min_t(unsigned int, len, sizeof(cinfo));
1423 if (copy_to_user(optval, (char *) &cinfo, len))
1424 err = -EFAULT;
1425
1426 break;
1427
1428 default:
1429 err = -ENOPROTOOPT;
1430 break;
1431 }
1432
1433 release_sock(sk);
1434 return err;
1435 }
1436
1437 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1438 {
1439 struct sock *sk = sock->sk;
1440 struct bt_security sec;
1441 int len, err = 0;
1442
1443 BT_DBG("sk %p", sk);
1444
1445 if (level == SOL_L2CAP)
1446 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1447
1448 if (level != SOL_BLUETOOTH)
1449 return -ENOPROTOOPT;
1450
1451 if (get_user(len, optlen))
1452 return -EFAULT;
1453
1454 lock_sock(sk);
1455
1456 switch (optname) {
1457 case BT_SECURITY:
1458 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1459 err = -EINVAL;
1460 break;
1461 }
1462
1463 sec.level = l2cap_pi(sk)->sec_level;
1464
1465 len = min_t(unsigned int, len, sizeof(sec));
1466 if (copy_to_user(optval, (char *) &sec, len))
1467 err = -EFAULT;
1468
1469 break;
1470
1471 case BT_DEFER_SETUP:
1472 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1473 err = -EINVAL;
1474 break;
1475 }
1476
1477 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1478 err = -EFAULT;
1479
1480 break;
1481
1482 default:
1483 err = -ENOPROTOOPT;
1484 break;
1485 }
1486
1487 release_sock(sk);
1488 return err;
1489 }
1490
1491 static int l2cap_sock_shutdown(struct socket *sock, int how)
1492 {
1493 struct sock *sk = sock->sk;
1494 int err = 0;
1495
1496 BT_DBG("sock %p, sk %p", sock, sk);
1497
1498 if (!sk)
1499 return 0;
1500
1501 lock_sock(sk);
1502 if (!sk->sk_shutdown) {
1503 sk->sk_shutdown = SHUTDOWN_MASK;
1504 l2cap_sock_clear_timer(sk);
1505 __l2cap_sock_close(sk, 0);
1506
1507 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1508 err = bt_sock_wait_state(sk, BT_CLOSED,
1509 sk->sk_lingertime);
1510 }
1511 release_sock(sk);
1512 return err;
1513 }
1514
1515 static int l2cap_sock_release(struct socket *sock)
1516 {
1517 struct sock *sk = sock->sk;
1518 int err;
1519
1520 BT_DBG("sock %p, sk %p", sock, sk);
1521
1522 if (!sk)
1523 return 0;
1524
1525 err = l2cap_sock_shutdown(sock, 2);
1526
1527 sock_orphan(sk);
1528 l2cap_sock_kill(sk);
1529 return err;
1530 }
1531
1532 static void l2cap_chan_ready(struct sock *sk)
1533 {
1534 struct sock *parent = bt_sk(sk)->parent;
1535
1536 BT_DBG("sk %p, parent %p", sk, parent);
1537
1538 l2cap_pi(sk)->conf_state = 0;
1539 l2cap_sock_clear_timer(sk);
1540
1541 if (!parent) {
1542 /* Outgoing channel.
1543 * Wake up socket sleeping on connect.
1544 */
1545 sk->sk_state = BT_CONNECTED;
1546 sk->sk_state_change(sk);
1547 } else {
1548 /* Incoming channel.
1549 * Wake up socket sleeping on accept.
1550 */
1551 parent->sk_data_ready(parent, 0);
1552 }
1553 }
1554
1555 /* Copy frame to all raw sockets on that connection */
1556 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1557 {
1558 struct l2cap_chan_list *l = &conn->chan_list;
1559 struct sk_buff *nskb;
1560 struct sock * sk;
1561
1562 BT_DBG("conn %p", conn);
1563
1564 read_lock(&l->lock);
1565 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1566 if (sk->sk_type != SOCK_RAW)
1567 continue;
1568
1569 /* Don't send frame to the socket it came from */
1570 if (skb->sk == sk)
1571 continue;
1572
1573 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1574 continue;
1575
1576 if (sock_queue_rcv_skb(sk, nskb))
1577 kfree_skb(nskb);
1578 }
1579 read_unlock(&l->lock);
1580 }
1581
1582 /* ---- L2CAP signalling commands ---- */
1583 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1584 u8 code, u8 ident, u16 dlen, void *data)
1585 {
1586 struct sk_buff *skb, **frag;
1587 struct l2cap_cmd_hdr *cmd;
1588 struct l2cap_hdr *lh;
1589 int len, count;
1590
1591 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1592
1593 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1594 count = min_t(unsigned int, conn->mtu, len);
1595
1596 skb = bt_skb_alloc(count, GFP_ATOMIC);
1597 if (!skb)
1598 return NULL;
1599
1600 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1601 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1602 lh->cid = cpu_to_le16(0x0001);
1603
1604 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1605 cmd->code = code;
1606 cmd->ident = ident;
1607 cmd->len = cpu_to_le16(dlen);
1608
1609 if (dlen) {
1610 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1611 memcpy(skb_put(skb, count), data, count);
1612 data += count;
1613 }
1614
1615 len -= skb->len;
1616
1617 /* Continuation fragments (no L2CAP header) */
1618 frag = &skb_shinfo(skb)->frag_list;
1619 while (len) {
1620 count = min_t(unsigned int, conn->mtu, len);
1621
1622 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1623 if (!*frag)
1624 goto fail;
1625
1626 memcpy(skb_put(*frag, count), data, count);
1627
1628 len -= count;
1629 data += count;
1630
1631 frag = &(*frag)->next;
1632 }
1633
1634 return skb;
1635
1636 fail:
1637 kfree_skb(skb);
1638 return NULL;
1639 }
1640
1641 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1642 {
1643 struct l2cap_conf_opt *opt = *ptr;
1644 int len;
1645
1646 len = L2CAP_CONF_OPT_SIZE + opt->len;
1647 *ptr += len;
1648
1649 *type = opt->type;
1650 *olen = opt->len;
1651
1652 switch (opt->len) {
1653 case 1:
1654 *val = *((u8 *) opt->val);
1655 break;
1656
1657 case 2:
1658 *val = __le16_to_cpu(*((__le16 *) opt->val));
1659 break;
1660
1661 case 4:
1662 *val = __le32_to_cpu(*((__le32 *) opt->val));
1663 break;
1664
1665 default:
1666 *val = (unsigned long) opt->val;
1667 break;
1668 }
1669
1670 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1671 return len;
1672 }
1673
1674 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1675 {
1676 struct l2cap_conf_opt *opt = *ptr;
1677
1678 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1679
1680 opt->type = type;
1681 opt->len = len;
1682
1683 switch (len) {
1684 case 1:
1685 *((u8 *) opt->val) = val;
1686 break;
1687
1688 case 2:
1689 *((__le16 *) opt->val) = cpu_to_le16(val);
1690 break;
1691
1692 case 4:
1693 *((__le32 *) opt->val) = cpu_to_le32(val);
1694 break;
1695
1696 default:
1697 memcpy(opt->val, (void *) val, len);
1698 break;
1699 }
1700
1701 *ptr += L2CAP_CONF_OPT_SIZE + len;
1702 }
1703
1704 static int l2cap_build_conf_req(struct sock *sk, void *data)
1705 {
1706 struct l2cap_pinfo *pi = l2cap_pi(sk);
1707 struct l2cap_conf_req *req = data;
1708 void *ptr = req->data;
1709
1710 BT_DBG("sk %p", sk);
1711
1712 if (pi->imtu != L2CAP_DEFAULT_MTU)
1713 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1714
1715 /* FIXME: Need actual value of the flush timeout */
1716 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1717 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1718
1719 req->dcid = cpu_to_le16(pi->dcid);
1720 req->flags = cpu_to_le16(0);
1721
1722 return ptr - data;
1723 }
1724
1725 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1726 {
1727 struct l2cap_pinfo *pi = l2cap_pi(sk);
1728 struct l2cap_conf_rsp *rsp = data;
1729 void *ptr = rsp->data;
1730 void *req = pi->conf_req;
1731 int len = pi->conf_len;
1732 int type, hint, olen;
1733 unsigned long val;
1734 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1735 u16 mtu = L2CAP_DEFAULT_MTU;
1736 u16 result = L2CAP_CONF_SUCCESS;
1737
1738 BT_DBG("sk %p", sk);
1739
1740 while (len >= L2CAP_CONF_OPT_SIZE) {
1741 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1742
1743 hint = type & 0x80;
1744 type &= 0x7f;
1745
1746 switch (type) {
1747 case L2CAP_CONF_MTU:
1748 mtu = val;
1749 break;
1750
1751 case L2CAP_CONF_FLUSH_TO:
1752 pi->flush_to = val;
1753 break;
1754
1755 case L2CAP_CONF_QOS:
1756 break;
1757
1758 case L2CAP_CONF_RFC:
1759 if (olen == sizeof(rfc))
1760 memcpy(&rfc, (void *) val, olen);
1761 break;
1762
1763 default:
1764 if (hint)
1765 break;
1766
1767 result = L2CAP_CONF_UNKNOWN;
1768 *((u8 *) ptr++) = type;
1769 break;
1770 }
1771 }
1772
1773 if (result == L2CAP_CONF_SUCCESS) {
1774 /* Configure output options and let the other side know
1775 * which ones we don't like. */
1776
1777 if (rfc.mode == L2CAP_MODE_BASIC) {
1778 if (mtu < pi->omtu)
1779 result = L2CAP_CONF_UNACCEPT;
1780 else {
1781 pi->omtu = mtu;
1782 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1783 }
1784
1785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1786 } else {
1787 result = L2CAP_CONF_UNACCEPT;
1788
1789 memset(&rfc, 0, sizeof(rfc));
1790 rfc.mode = L2CAP_MODE_BASIC;
1791
1792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1793 sizeof(rfc), (unsigned long) &rfc);
1794 }
1795 }
1796
1797 rsp->scid = cpu_to_le16(pi->dcid);
1798 rsp->result = cpu_to_le16(result);
1799 rsp->flags = cpu_to_le16(0x0000);
1800
1801 return ptr - data;
1802 }
1803
1804 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1805 {
1806 struct l2cap_conf_rsp *rsp = data;
1807 void *ptr = rsp->data;
1808
1809 BT_DBG("sk %p", sk);
1810
1811 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1812 rsp->result = cpu_to_le16(result);
1813 rsp->flags = cpu_to_le16(flags);
1814
1815 return ptr - data;
1816 }
1817
1818 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1819 {
1820 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1821
1822 if (rej->reason != 0x0000)
1823 return 0;
1824
1825 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1826 cmd->ident == conn->info_ident) {
1827 del_timer(&conn->info_timer);
1828
1829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1830 conn->info_ident = 0;
1831
1832 l2cap_conn_start(conn);
1833 }
1834
1835 return 0;
1836 }
1837
1838 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1839 {
1840 struct l2cap_chan_list *list = &conn->chan_list;
1841 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1842 struct l2cap_conn_rsp rsp;
1843 struct sock *sk, *parent;
1844 int result, status = L2CAP_CS_NO_INFO;
1845
1846 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1847 __le16 psm = req->psm;
1848
1849 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1850
1851 /* Check if we have socket listening on psm */
1852 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1853 if (!parent) {
1854 result = L2CAP_CR_BAD_PSM;
1855 goto sendresp;
1856 }
1857
1858 /* Check if the ACL is secure enough (if not SDP) */
1859 if (psm != cpu_to_le16(0x0001) &&
1860 !hci_conn_check_link_mode(conn->hcon)) {
1861 conn->disc_reason = 0x05;
1862 result = L2CAP_CR_SEC_BLOCK;
1863 goto response;
1864 }
1865
1866 result = L2CAP_CR_NO_MEM;
1867
1868 /* Check for backlog size */
1869 if (sk_acceptq_is_full(parent)) {
1870 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1871 goto response;
1872 }
1873
1874 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1875 if (!sk)
1876 goto response;
1877
1878 write_lock_bh(&list->lock);
1879
1880 /* Check if we already have channel with that dcid */
1881 if (__l2cap_get_chan_by_dcid(list, scid)) {
1882 write_unlock_bh(&list->lock);
1883 sock_set_flag(sk, SOCK_ZAPPED);
1884 l2cap_sock_kill(sk);
1885 goto response;
1886 }
1887
1888 hci_conn_hold(conn->hcon);
1889
1890 l2cap_sock_init(sk, parent);
1891 bacpy(&bt_sk(sk)->src, conn->src);
1892 bacpy(&bt_sk(sk)->dst, conn->dst);
1893 l2cap_pi(sk)->psm = psm;
1894 l2cap_pi(sk)->dcid = scid;
1895
1896 __l2cap_chan_add(conn, sk, parent);
1897 dcid = l2cap_pi(sk)->scid;
1898
1899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1900
1901 l2cap_pi(sk)->ident = cmd->ident;
1902
1903 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1904 if (l2cap_check_security(sk)) {
1905 if (bt_sk(sk)->defer_setup) {
1906 sk->sk_state = BT_CONNECT2;
1907 result = L2CAP_CR_PEND;
1908 status = L2CAP_CS_AUTHOR_PEND;
1909 parent->sk_data_ready(parent, 0);
1910 } else {
1911 sk->sk_state = BT_CONFIG;
1912 result = L2CAP_CR_SUCCESS;
1913 status = L2CAP_CS_NO_INFO;
1914 }
1915 } else {
1916 sk->sk_state = BT_CONNECT2;
1917 result = L2CAP_CR_PEND;
1918 status = L2CAP_CS_AUTHEN_PEND;
1919 }
1920 } else {
1921 sk->sk_state = BT_CONNECT2;
1922 result = L2CAP_CR_PEND;
1923 status = L2CAP_CS_NO_INFO;
1924 }
1925
1926 write_unlock_bh(&list->lock);
1927
1928 response:
1929 bh_unlock_sock(parent);
1930
1931 sendresp:
1932 rsp.scid = cpu_to_le16(scid);
1933 rsp.dcid = cpu_to_le16(dcid);
1934 rsp.result = cpu_to_le16(result);
1935 rsp.status = cpu_to_le16(status);
1936 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1937
1938 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1939 struct l2cap_info_req info;
1940 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1941
1942 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1943 conn->info_ident = l2cap_get_ident(conn);
1944
1945 mod_timer(&conn->info_timer, jiffies +
1946 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1947
1948 l2cap_send_cmd(conn, conn->info_ident,
1949 L2CAP_INFO_REQ, sizeof(info), &info);
1950 }
1951
1952 return 0;
1953 }
1954
1955 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1956 {
1957 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1958 u16 scid, dcid, result, status;
1959 struct sock *sk;
1960 u8 req[128];
1961
1962 scid = __le16_to_cpu(rsp->scid);
1963 dcid = __le16_to_cpu(rsp->dcid);
1964 result = __le16_to_cpu(rsp->result);
1965 status = __le16_to_cpu(rsp->status);
1966
1967 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1968
1969 if (scid) {
1970 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1971 return 0;
1972 } else {
1973 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1974 return 0;
1975 }
1976
1977 switch (result) {
1978 case L2CAP_CR_SUCCESS:
1979 sk->sk_state = BT_CONFIG;
1980 l2cap_pi(sk)->ident = 0;
1981 l2cap_pi(sk)->dcid = dcid;
1982 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1983
1984 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1985
1986 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1987 l2cap_build_conf_req(sk, req), req);
1988 break;
1989
1990 case L2CAP_CR_PEND:
1991 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
1992 break;
1993
1994 default:
1995 l2cap_chan_del(sk, ECONNREFUSED);
1996 break;
1997 }
1998
1999 bh_unlock_sock(sk);
2000 return 0;
2001 }
2002
2003 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2004 {
2005 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2006 u16 dcid, flags;
2007 u8 rsp[64];
2008 struct sock *sk;
2009 int len;
2010
2011 dcid = __le16_to_cpu(req->dcid);
2012 flags = __le16_to_cpu(req->flags);
2013
2014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2015
2016 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2017 return -ENOENT;
2018
2019 if (sk->sk_state == BT_DISCONN)
2020 goto unlock;
2021
2022 /* Reject if config buffer is too small. */
2023 len = cmd_len - sizeof(*req);
2024 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2025 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2026 l2cap_build_conf_rsp(sk, rsp,
2027 L2CAP_CONF_REJECT, flags), rsp);
2028 goto unlock;
2029 }
2030
2031 /* Store config. */
2032 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2033 l2cap_pi(sk)->conf_len += len;
2034
2035 if (flags & 0x0001) {
2036 /* Incomplete config. Send empty response. */
2037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2038 l2cap_build_conf_rsp(sk, rsp,
2039 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2040 goto unlock;
2041 }
2042
2043 /* Complete config. */
2044 len = l2cap_parse_conf_req(sk, rsp);
2045 if (len < 0)
2046 goto unlock;
2047
2048 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2049
2050 /* Reset config buffer. */
2051 l2cap_pi(sk)->conf_len = 0;
2052
2053 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2054 goto unlock;
2055
2056 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2057 sk->sk_state = BT_CONNECTED;
2058 l2cap_chan_ready(sk);
2059 goto unlock;
2060 }
2061
2062 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2063 u8 buf[64];
2064 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2065 l2cap_build_conf_req(sk, buf), buf);
2066 }
2067
2068 unlock:
2069 bh_unlock_sock(sk);
2070 return 0;
2071 }
2072
2073 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2074 {
2075 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2076 u16 scid, flags, result;
2077 struct sock *sk;
2078
2079 scid = __le16_to_cpu(rsp->scid);
2080 flags = __le16_to_cpu(rsp->flags);
2081 result = __le16_to_cpu(rsp->result);
2082
2083 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2084
2085 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2086 return 0;
2087
2088 switch (result) {
2089 case L2CAP_CONF_SUCCESS:
2090 break;
2091
2092 case L2CAP_CONF_UNACCEPT:
2093 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2094 char req[128];
2095 /* It does not make sense to adjust L2CAP parameters
2096 * that are currently defined in the spec. We simply
2097 * resend config request that we sent earlier. It is
2098 * stupid, but it helps qualification testing which
2099 * expects at least some response from us. */
2100 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2101 l2cap_build_conf_req(sk, req), req);
2102 goto done;
2103 }
2104
2105 default:
2106 sk->sk_state = BT_DISCONN;
2107 sk->sk_err = ECONNRESET;
2108 l2cap_sock_set_timer(sk, HZ * 5);
2109 {
2110 struct l2cap_disconn_req req;
2111 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2112 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2113 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2114 L2CAP_DISCONN_REQ, sizeof(req), &req);
2115 }
2116 goto done;
2117 }
2118
2119 if (flags & 0x01)
2120 goto done;
2121
2122 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2123
2124 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2125 sk->sk_state = BT_CONNECTED;
2126 l2cap_chan_ready(sk);
2127 }
2128
2129 done:
2130 bh_unlock_sock(sk);
2131 return 0;
2132 }
2133
2134 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2135 {
2136 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2137 struct l2cap_disconn_rsp rsp;
2138 u16 dcid, scid;
2139 struct sock *sk;
2140
2141 scid = __le16_to_cpu(req->scid);
2142 dcid = __le16_to_cpu(req->dcid);
2143
2144 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2145
2146 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2147 return 0;
2148
2149 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2150 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2151 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2152
2153 sk->sk_shutdown = SHUTDOWN_MASK;
2154
2155 l2cap_chan_del(sk, ECONNRESET);
2156 bh_unlock_sock(sk);
2157
2158 l2cap_sock_kill(sk);
2159 return 0;
2160 }
2161
2162 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2163 {
2164 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2165 u16 dcid, scid;
2166 struct sock *sk;
2167
2168 scid = __le16_to_cpu(rsp->scid);
2169 dcid = __le16_to_cpu(rsp->dcid);
2170
2171 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2172
2173 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2174 return 0;
2175
2176 l2cap_chan_del(sk, 0);
2177 bh_unlock_sock(sk);
2178
2179 l2cap_sock_kill(sk);
2180 return 0;
2181 }
2182
2183 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2184 {
2185 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2186 u16 type;
2187
2188 type = __le16_to_cpu(req->type);
2189
2190 BT_DBG("type 0x%4.4x", type);
2191
2192 if (type == L2CAP_IT_FEAT_MASK) {
2193 u8 buf[8];
2194 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2195 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2196 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2197 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2198 l2cap_send_cmd(conn, cmd->ident,
2199 L2CAP_INFO_RSP, sizeof(buf), buf);
2200 } else if (type == L2CAP_IT_FIXED_CHAN) {
2201 u8 buf[12];
2202 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2203 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2204 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2205 memcpy(buf + 4, l2cap_fixed_chan, 8);
2206 l2cap_send_cmd(conn, cmd->ident,
2207 L2CAP_INFO_RSP, sizeof(buf), buf);
2208 } else {
2209 struct l2cap_info_rsp rsp;
2210 rsp.type = cpu_to_le16(type);
2211 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2212 l2cap_send_cmd(conn, cmd->ident,
2213 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2214 }
2215
2216 return 0;
2217 }
2218
2219 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2220 {
2221 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2222 u16 type, result;
2223
2224 type = __le16_to_cpu(rsp->type);
2225 result = __le16_to_cpu(rsp->result);
2226
2227 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2228
2229 del_timer(&conn->info_timer);
2230
2231 if (type == L2CAP_IT_FEAT_MASK) {
2232 conn->feat_mask = get_unaligned_le32(rsp->data);
2233
2234 if (conn->feat_mask & 0x0080) {
2235 struct l2cap_info_req req;
2236 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2237
2238 conn->info_ident = l2cap_get_ident(conn);
2239
2240 l2cap_send_cmd(conn, conn->info_ident,
2241 L2CAP_INFO_REQ, sizeof(req), &req);
2242 } else {
2243 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2244 conn->info_ident = 0;
2245
2246 l2cap_conn_start(conn);
2247 }
2248 } else if (type == L2CAP_IT_FIXED_CHAN) {
2249 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2250 conn->info_ident = 0;
2251
2252 l2cap_conn_start(conn);
2253 }
2254
2255 return 0;
2256 }
2257
2258 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2259 {
2260 u8 *data = skb->data;
2261 int len = skb->len;
2262 struct l2cap_cmd_hdr cmd;
2263 int err = 0;
2264
2265 l2cap_raw_recv(conn, skb);
2266
2267 while (len >= L2CAP_CMD_HDR_SIZE) {
2268 u16 cmd_len;
2269 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2270 data += L2CAP_CMD_HDR_SIZE;
2271 len -= L2CAP_CMD_HDR_SIZE;
2272
2273 cmd_len = le16_to_cpu(cmd.len);
2274
2275 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2276
2277 if (cmd_len > len || !cmd.ident) {
2278 BT_DBG("corrupted command");
2279 break;
2280 }
2281
2282 switch (cmd.code) {
2283 case L2CAP_COMMAND_REJ:
2284 l2cap_command_rej(conn, &cmd, data);
2285 break;
2286
2287 case L2CAP_CONN_REQ:
2288 err = l2cap_connect_req(conn, &cmd, data);
2289 break;
2290
2291 case L2CAP_CONN_RSP:
2292 err = l2cap_connect_rsp(conn, &cmd, data);
2293 break;
2294
2295 case L2CAP_CONF_REQ:
2296 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2297 break;
2298
2299 case L2CAP_CONF_RSP:
2300 err = l2cap_config_rsp(conn, &cmd, data);
2301 break;
2302
2303 case L2CAP_DISCONN_REQ:
2304 err = l2cap_disconnect_req(conn, &cmd, data);
2305 break;
2306
2307 case L2CAP_DISCONN_RSP:
2308 err = l2cap_disconnect_rsp(conn, &cmd, data);
2309 break;
2310
2311 case L2CAP_ECHO_REQ:
2312 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2313 break;
2314
2315 case L2CAP_ECHO_RSP:
2316 break;
2317
2318 case L2CAP_INFO_REQ:
2319 err = l2cap_information_req(conn, &cmd, data);
2320 break;
2321
2322 case L2CAP_INFO_RSP:
2323 err = l2cap_information_rsp(conn, &cmd, data);
2324 break;
2325
2326 default:
2327 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2328 err = -EINVAL;
2329 break;
2330 }
2331
2332 if (err) {
2333 struct l2cap_cmd_rej rej;
2334 BT_DBG("error %d", err);
2335
2336 /* FIXME: Map err to a valid reason */
2337 rej.reason = cpu_to_le16(0);
2338 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2339 }
2340
2341 data += cmd_len;
2342 len -= cmd_len;
2343 }
2344
2345 kfree_skb(skb);
2346 }
2347
2348 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2349 {
2350 struct sock *sk;
2351
2352 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2353 if (!sk) {
2354 BT_DBG("unknown cid 0x%4.4x", cid);
2355 goto drop;
2356 }
2357
2358 BT_DBG("sk %p, len %d", sk, skb->len);
2359
2360 if (sk->sk_state != BT_CONNECTED)
2361 goto drop;
2362
2363 if (l2cap_pi(sk)->imtu < skb->len)
2364 goto drop;
2365
2366 /* If socket recv buffers overflows we drop data here
2367 * which is *bad* because L2CAP has to be reliable.
2368 * But we don't have any other choice. L2CAP doesn't
2369 * provide flow control mechanism. */
2370
2371 if (!sock_queue_rcv_skb(sk, skb))
2372 goto done;
2373
2374 drop:
2375 kfree_skb(skb);
2376
2377 done:
2378 if (sk)
2379 bh_unlock_sock(sk);
2380
2381 return 0;
2382 }
2383
2384 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2385 {
2386 struct sock *sk;
2387
2388 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2389 if (!sk)
2390 goto drop;
2391
2392 BT_DBG("sk %p, len %d", sk, skb->len);
2393
2394 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2395 goto drop;
2396
2397 if (l2cap_pi(sk)->imtu < skb->len)
2398 goto drop;
2399
2400 if (!sock_queue_rcv_skb(sk, skb))
2401 goto done;
2402
2403 drop:
2404 kfree_skb(skb);
2405
2406 done:
2407 if (sk) bh_unlock_sock(sk);
2408 return 0;
2409 }
2410
2411 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2412 {
2413 struct l2cap_hdr *lh = (void *) skb->data;
2414 u16 cid, len;
2415 __le16 psm;
2416
2417 skb_pull(skb, L2CAP_HDR_SIZE);
2418 cid = __le16_to_cpu(lh->cid);
2419 len = __le16_to_cpu(lh->len);
2420
2421 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2422
2423 switch (cid) {
2424 case 0x0001:
2425 l2cap_sig_channel(conn, skb);
2426 break;
2427
2428 case 0x0002:
2429 psm = get_unaligned((__le16 *) skb->data);
2430 skb_pull(skb, 2);
2431 l2cap_conless_channel(conn, psm, skb);
2432 break;
2433
2434 default:
2435 l2cap_data_channel(conn, cid, skb);
2436 break;
2437 }
2438 }
2439
2440 /* ---- L2CAP interface with lower layer (HCI) ---- */
2441
2442 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2443 {
2444 int exact = 0, lm1 = 0, lm2 = 0;
2445 register struct sock *sk;
2446 struct hlist_node *node;
2447
2448 if (type != ACL_LINK)
2449 return 0;
2450
2451 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2452
2453 /* Find listening sockets and check their link_mode */
2454 read_lock(&l2cap_sk_list.lock);
2455 sk_for_each(sk, node, &l2cap_sk_list.head) {
2456 if (sk->sk_state != BT_LISTEN)
2457 continue;
2458
2459 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2460 lm1 |= HCI_LM_ACCEPT;
2461 if (l2cap_pi(sk)->role_switch)
2462 lm1 |= HCI_LM_MASTER;
2463 exact++;
2464 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2465 lm2 |= HCI_LM_ACCEPT;
2466 if (l2cap_pi(sk)->role_switch)
2467 lm2 |= HCI_LM_MASTER;
2468 }
2469 }
2470 read_unlock(&l2cap_sk_list.lock);
2471
2472 return exact ? lm1 : lm2;
2473 }
2474
2475 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2476 {
2477 struct l2cap_conn *conn;
2478
2479 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2480
2481 if (hcon->type != ACL_LINK)
2482 return 0;
2483
2484 if (!status) {
2485 conn = l2cap_conn_add(hcon, status);
2486 if (conn)
2487 l2cap_conn_ready(conn);
2488 } else
2489 l2cap_conn_del(hcon, bt_err(status));
2490
2491 return 0;
2492 }
2493
2494 static int l2cap_disconn_ind(struct hci_conn *hcon)
2495 {
2496 struct l2cap_conn *conn = hcon->l2cap_data;
2497
2498 BT_DBG("hcon %p", hcon);
2499
2500 if (hcon->type != ACL_LINK || !conn)
2501 return 0x13;
2502
2503 return conn->disc_reason;
2504 }
2505
2506 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2507 {
2508 BT_DBG("hcon %p reason %d", hcon, reason);
2509
2510 if (hcon->type != ACL_LINK)
2511 return 0;
2512
2513 l2cap_conn_del(hcon, bt_err(reason));
2514
2515 return 0;
2516 }
2517
2518 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2519 {
2520 if (sk->sk_type != SOCK_SEQPACKET)
2521 return;
2522
2523 if (encrypt == 0x00) {
2524 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2525 l2cap_sock_clear_timer(sk);
2526 l2cap_sock_set_timer(sk, HZ * 5);
2527 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2528 __l2cap_sock_close(sk, ECONNREFUSED);
2529 } else {
2530 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2531 l2cap_sock_clear_timer(sk);
2532 }
2533 }
2534
2535 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2536 {
2537 struct l2cap_chan_list *l;
2538 struct l2cap_conn *conn = hcon->l2cap_data;
2539 struct sock *sk;
2540
2541 if (!conn)
2542 return 0;
2543
2544 l = &conn->chan_list;
2545
2546 BT_DBG("conn %p", conn);
2547
2548 read_lock(&l->lock);
2549
2550 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2551 bh_lock_sock(sk);
2552
2553 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2554 bh_unlock_sock(sk);
2555 continue;
2556 }
2557
2558 if (!status && (sk->sk_state == BT_CONNECTED ||
2559 sk->sk_state == BT_CONFIG)) {
2560 l2cap_check_encryption(sk, encrypt);
2561 bh_unlock_sock(sk);
2562 continue;
2563 }
2564
2565 if (sk->sk_state == BT_CONNECT) {
2566 if (!status) {
2567 struct l2cap_conn_req req;
2568 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2569 req.psm = l2cap_pi(sk)->psm;
2570
2571 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2572
2573 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2574 L2CAP_CONN_REQ, sizeof(req), &req);
2575 } else {
2576 l2cap_sock_clear_timer(sk);
2577 l2cap_sock_set_timer(sk, HZ / 10);
2578 }
2579 } else if (sk->sk_state == BT_CONNECT2) {
2580 struct l2cap_conn_rsp rsp;
2581 __u16 result;
2582
2583 if (!status) {
2584 sk->sk_state = BT_CONFIG;
2585 result = L2CAP_CR_SUCCESS;
2586 } else {
2587 sk->sk_state = BT_DISCONN;
2588 l2cap_sock_set_timer(sk, HZ / 10);
2589 result = L2CAP_CR_SEC_BLOCK;
2590 }
2591
2592 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2593 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2594 rsp.result = cpu_to_le16(result);
2595 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2596 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2597 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2598 }
2599
2600 bh_unlock_sock(sk);
2601 }
2602
2603 read_unlock(&l->lock);
2604
2605 return 0;
2606 }
2607
2608 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2609 {
2610 struct l2cap_conn *conn = hcon->l2cap_data;
2611
2612 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2613 goto drop;
2614
2615 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2616
2617 if (flags & ACL_START) {
2618 struct l2cap_hdr *hdr;
2619 int len;
2620
2621 if (conn->rx_len) {
2622 BT_ERR("Unexpected start frame (len %d)", skb->len);
2623 kfree_skb(conn->rx_skb);
2624 conn->rx_skb = NULL;
2625 conn->rx_len = 0;
2626 l2cap_conn_unreliable(conn, ECOMM);
2627 }
2628
2629 if (skb->len < 2) {
2630 BT_ERR("Frame is too short (len %d)", skb->len);
2631 l2cap_conn_unreliable(conn, ECOMM);
2632 goto drop;
2633 }
2634
2635 hdr = (struct l2cap_hdr *) skb->data;
2636 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2637
2638 if (len == skb->len) {
2639 /* Complete frame received */
2640 l2cap_recv_frame(conn, skb);
2641 return 0;
2642 }
2643
2644 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2645
2646 if (skb->len > len) {
2647 BT_ERR("Frame is too long (len %d, expected len %d)",
2648 skb->len, len);
2649 l2cap_conn_unreliable(conn, ECOMM);
2650 goto drop;
2651 }
2652
2653 /* Allocate skb for the complete frame (with header) */
2654 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2655 goto drop;
2656
2657 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2658 skb->len);
2659 conn->rx_len = len - skb->len;
2660 } else {
2661 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2662
2663 if (!conn->rx_len) {
2664 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2665 l2cap_conn_unreliable(conn, ECOMM);
2666 goto drop;
2667 }
2668
2669 if (skb->len > conn->rx_len) {
2670 BT_ERR("Fragment is too long (len %d, expected %d)",
2671 skb->len, conn->rx_len);
2672 kfree_skb(conn->rx_skb);
2673 conn->rx_skb = NULL;
2674 conn->rx_len = 0;
2675 l2cap_conn_unreliable(conn, ECOMM);
2676 goto drop;
2677 }
2678
2679 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2680 skb->len);
2681 conn->rx_len -= skb->len;
2682
2683 if (!conn->rx_len) {
2684 /* Complete frame received */
2685 l2cap_recv_frame(conn, conn->rx_skb);
2686 conn->rx_skb = NULL;
2687 }
2688 }
2689
2690 drop:
2691 kfree_skb(skb);
2692 return 0;
2693 }
2694
2695 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2696 {
2697 struct sock *sk;
2698 struct hlist_node *node;
2699 char *str = buf;
2700
2701 read_lock_bh(&l2cap_sk_list.lock);
2702
2703 sk_for_each(sk, node, &l2cap_sk_list.head) {
2704 struct l2cap_pinfo *pi = l2cap_pi(sk);
2705
2706 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2707 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2708 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2709 pi->imtu, pi->omtu, pi->sec_level);
2710 }
2711
2712 read_unlock_bh(&l2cap_sk_list.lock);
2713
2714 return (str - buf);
2715 }
2716
2717 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2718
2719 static const struct proto_ops l2cap_sock_ops = {
2720 .family = PF_BLUETOOTH,
2721 .owner = THIS_MODULE,
2722 .release = l2cap_sock_release,
2723 .bind = l2cap_sock_bind,
2724 .connect = l2cap_sock_connect,
2725 .listen = l2cap_sock_listen,
2726 .accept = l2cap_sock_accept,
2727 .getname = l2cap_sock_getname,
2728 .sendmsg = l2cap_sock_sendmsg,
2729 .recvmsg = l2cap_sock_recvmsg,
2730 .poll = bt_sock_poll,
2731 .ioctl = bt_sock_ioctl,
2732 .mmap = sock_no_mmap,
2733 .socketpair = sock_no_socketpair,
2734 .shutdown = l2cap_sock_shutdown,
2735 .setsockopt = l2cap_sock_setsockopt,
2736 .getsockopt = l2cap_sock_getsockopt
2737 };
2738
2739 static struct net_proto_family l2cap_sock_family_ops = {
2740 .family = PF_BLUETOOTH,
2741 .owner = THIS_MODULE,
2742 .create = l2cap_sock_create,
2743 };
2744
2745 static struct hci_proto l2cap_hci_proto = {
2746 .name = "L2CAP",
2747 .id = HCI_PROTO_L2CAP,
2748 .connect_ind = l2cap_connect_ind,
2749 .connect_cfm = l2cap_connect_cfm,
2750 .disconn_ind = l2cap_disconn_ind,
2751 .disconn_cfm = l2cap_disconn_cfm,
2752 .security_cfm = l2cap_security_cfm,
2753 .recv_acldata = l2cap_recv_acldata
2754 };
2755
2756 static int __init l2cap_init(void)
2757 {
2758 int err;
2759
2760 err = proto_register(&l2cap_proto, 0);
2761 if (err < 0)
2762 return err;
2763
2764 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2765 if (err < 0) {
2766 BT_ERR("L2CAP socket registration failed");
2767 goto error;
2768 }
2769
2770 err = hci_register_proto(&l2cap_hci_proto);
2771 if (err < 0) {
2772 BT_ERR("L2CAP protocol registration failed");
2773 bt_sock_unregister(BTPROTO_L2CAP);
2774 goto error;
2775 }
2776
2777 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2778 BT_ERR("Failed to create L2CAP info file");
2779
2780 BT_INFO("L2CAP ver %s", VERSION);
2781 BT_INFO("L2CAP socket layer initialized");
2782
2783 return 0;
2784
2785 error:
2786 proto_unregister(&l2cap_proto);
2787 return err;
2788 }
2789
2790 static void __exit l2cap_exit(void)
2791 {
2792 class_remove_file(bt_class, &class_attr_l2cap);
2793
2794 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2795 BT_ERR("L2CAP socket unregistration failed");
2796
2797 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2798 BT_ERR("L2CAP protocol unregistration failed");
2799
2800 proto_unregister(&l2cap_proto);
2801 }
2802
2803 void l2cap_load(void)
2804 {
2805 /* Dummy function to trigger automatic L2CAP module loading by
2806 * other modules that use L2CAP sockets but don't use any other
2807 * symbols from it. */
2808 return;
2809 }
2810 EXPORT_SYMBOL(l2cap_load);
2811
2812 module_init(l2cap_init);
2813 module_exit(l2cap_exit);
2814
2815 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2816 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2817 MODULE_VERSION(VERSION);
2818 MODULE_LICENSE("GPL");
2819 MODULE_ALIAS("bt-proto-0");
This page took 0.091974 seconds and 6 git commands to generate.