Bluetooth: Fix PTR_ERR return of wrong pointer in hidp_setup_hid()
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
53
54 #define VERSION "2.14"
55
56 static int enable_ertm = 0;
57 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
58
59 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
60 static u8 l2cap_fixed_chan[8] = { 0x02, };
61
62 static const struct proto_ops l2cap_sock_ops;
63
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 };
67
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
71
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
74
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
77 {
78 struct sock *sk = (struct sock *) arg;
79 int reason;
80
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
82
83 bh_lock_sock(sk);
84
85 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
86 reason = ECONNREFUSED;
87 else if (sk->sk_state == BT_CONNECT &&
88 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
89 reason = ECONNREFUSED;
90 else
91 reason = ETIMEDOUT;
92
93 __l2cap_sock_close(sk, reason);
94
95 bh_unlock_sock(sk);
96
97 l2cap_sock_kill(sk);
98 sock_put(sk);
99 }
100
101 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 {
103 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
104 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 }
106
107 static void l2cap_sock_clear_timer(struct sock *sk)
108 {
109 BT_DBG("sock %p state %d", sk, sk->sk_state);
110 sk_stop_timer(sk, &sk->sk_timer);
111 }
112
113 /* ---- L2CAP channels ---- */
114 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 {
116 struct sock *s;
117 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
118 if (l2cap_pi(s)->dcid == cid)
119 break;
120 }
121 return s;
122 }
123
124 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 {
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->scid == cid)
129 break;
130 }
131 return s;
132 }
133
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 {
138 struct sock *s;
139 read_lock(&l->lock);
140 s = __l2cap_get_chan_by_scid(l, cid);
141 if (s)
142 bh_lock_sock(s);
143 read_unlock(&l->lock);
144 return s;
145 }
146
147 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 {
149 struct sock *s;
150 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
151 if (l2cap_pi(s)->ident == ident)
152 break;
153 }
154 return s;
155 }
156
157 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 {
159 struct sock *s;
160 read_lock(&l->lock);
161 s = __l2cap_get_chan_by_ident(l, ident);
162 if (s)
163 bh_lock_sock(s);
164 read_unlock(&l->lock);
165 return s;
166 }
167
168 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 {
170 u16 cid = L2CAP_CID_DYN_START;
171
172 for (; cid < L2CAP_CID_DYN_END; cid++) {
173 if (!__l2cap_get_chan_by_scid(l, cid))
174 return cid;
175 }
176
177 return 0;
178 }
179
180 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
181 {
182 sock_hold(sk);
183
184 if (l->head)
185 l2cap_pi(l->head)->prev_c = sk;
186
187 l2cap_pi(sk)->next_c = l->head;
188 l2cap_pi(sk)->prev_c = NULL;
189 l->head = sk;
190 }
191
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 {
194 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195
196 write_lock_bh(&l->lock);
197 if (sk == l->head)
198 l->head = next;
199
200 if (next)
201 l2cap_pi(next)->prev_c = prev;
202 if (prev)
203 l2cap_pi(prev)->next_c = next;
204 write_unlock_bh(&l->lock);
205
206 __sock_put(sk);
207 }
208
209 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 {
211 struct l2cap_chan_list *l = &conn->chan_list;
212
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
214 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215
216 conn->disc_reason = 0x13;
217
218 l2cap_pi(sk)->conn = conn;
219
220 if (sk->sk_type == SOCK_SEQPACKET) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
223 } else if (sk->sk_type == SOCK_DGRAM) {
224 /* Connectionless socket */
225 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
227 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 } else {
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 }
234
235 __l2cap_chan_link(l, sk);
236
237 if (parent)
238 bt_accept_enqueue(parent, sk);
239 }
240
241 /* Delete channel.
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock *sk, int err)
244 {
245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
246 struct sock *parent = bt_sk(sk)->parent;
247
248 l2cap_sock_clear_timer(sk);
249
250 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251
252 if (conn) {
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn->chan_list, sk);
255 l2cap_pi(sk)->conn = NULL;
256 hci_conn_put(conn->hcon);
257 }
258
259 sk->sk_state = BT_CLOSED;
260 sock_set_flag(sk, SOCK_ZAPPED);
261
262 if (err)
263 sk->sk_err = err;
264
265 if (parent) {
266 bt_accept_unlink(sk);
267 parent->sk_data_ready(parent, 0);
268 } else
269 sk->sk_state_change(sk);
270 }
271
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock *sk)
274 {
275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 __u8 auth_type;
277
278 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
280 auth_type = HCI_AT_NO_BONDING_MITM;
281 else
282 auth_type = HCI_AT_NO_BONDING;
283
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
285 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 } else {
287 switch (l2cap_pi(sk)->sec_level) {
288 case BT_SECURITY_HIGH:
289 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 break;
291 case BT_SECURITY_MEDIUM:
292 auth_type = HCI_AT_GENERAL_BONDING;
293 break;
294 default:
295 auth_type = HCI_AT_NO_BONDING;
296 break;
297 }
298 }
299
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
302 }
303
304 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
305 {
306 u8 id;
307
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
312 */
313
314 spin_lock_bh(&conn->lock);
315
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
318
319 id = conn->tx_ident;
320
321 spin_unlock_bh(&conn->lock);
322
323 return id;
324 }
325
326 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 {
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329
330 BT_DBG("code 0x%2.2x", code);
331
332 if (!skb)
333 return -ENOMEM;
334
335 return hci_send_acl(conn->hcon, skb, 0);
336 }
337
338 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 {
340 struct sk_buff *skb;
341 struct l2cap_hdr *lh;
342 struct l2cap_conn *conn = pi->conn;
343 int count, hlen = L2CAP_HDR_SIZE + 2;
344
345 if (pi->fcs == L2CAP_FCS_CRC16)
346 hlen += 2;
347
348 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349
350 count = min_t(unsigned int, conn->mtu, hlen);
351 control |= L2CAP_CTRL_FRAME_TYPE;
352
353 skb = bt_skb_alloc(count, GFP_ATOMIC);
354 if (!skb)
355 return -ENOMEM;
356
357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
358 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
359 lh->cid = cpu_to_le16(pi->dcid);
360 put_unaligned_le16(control, skb_put(skb, 2));
361
362 if (pi->fcs == L2CAP_FCS_CRC16) {
363 u16 fcs = crc16(0, (u8 *)lh, count - 2);
364 put_unaligned_le16(fcs, skb_put(skb, 2));
365 }
366
367 return hci_send_acl(pi->conn->hcon, skb, 0);
368 }
369
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371 {
372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
373 control |= L2CAP_SUPER_RCV_NOT_READY;
374 else
375 control |= L2CAP_SUPER_RCV_READY;
376
377 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
378
379 return l2cap_send_sframe(pi, control);
380 }
381
382 static void l2cap_do_start(struct sock *sk)
383 {
384 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
385
386 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
387 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
388 return;
389
390 if (l2cap_check_security(sk)) {
391 struct l2cap_conn_req req;
392 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
393 req.psm = l2cap_pi(sk)->psm;
394
395 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
396
397 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
398 L2CAP_CONN_REQ, sizeof(req), &req);
399 }
400 } else {
401 struct l2cap_info_req req;
402 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
403
404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
405 conn->info_ident = l2cap_get_ident(conn);
406
407 mod_timer(&conn->info_timer, jiffies +
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
409
410 l2cap_send_cmd(conn, conn->info_ident,
411 L2CAP_INFO_REQ, sizeof(req), &req);
412 }
413 }
414
415 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
416 {
417 struct l2cap_disconn_req req;
418
419 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 l2cap_send_cmd(conn, l2cap_get_ident(conn),
422 L2CAP_DISCONN_REQ, sizeof(req), &req);
423 }
424
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn *conn)
427 {
428 struct l2cap_chan_list *l = &conn->chan_list;
429 struct sock *sk;
430
431 BT_DBG("conn %p", conn);
432
433 read_lock(&l->lock);
434
435 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
436 bh_lock_sock(sk);
437
438 if (sk->sk_type != SOCK_SEQPACKET) {
439 bh_unlock_sock(sk);
440 continue;
441 }
442
443 if (sk->sk_state == BT_CONNECT) {
444 if (l2cap_check_security(sk)) {
445 struct l2cap_conn_req req;
446 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
447 req.psm = l2cap_pi(sk)->psm;
448
449 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
450
451 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
452 L2CAP_CONN_REQ, sizeof(req), &req);
453 }
454 } else if (sk->sk_state == BT_CONNECT2) {
455 struct l2cap_conn_rsp rsp;
456 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
457 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
458
459 if (l2cap_check_security(sk)) {
460 if (bt_sk(sk)->defer_setup) {
461 struct sock *parent = bt_sk(sk)->parent;
462 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
463 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
464 parent->sk_data_ready(parent, 0);
465
466 } else {
467 sk->sk_state = BT_CONFIG;
468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
470 }
471 } else {
472 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
473 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
474 }
475
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
478 }
479
480 bh_unlock_sock(sk);
481 }
482
483 read_unlock(&l->lock);
484 }
485
486 static void l2cap_conn_ready(struct l2cap_conn *conn)
487 {
488 struct l2cap_chan_list *l = &conn->chan_list;
489 struct sock *sk;
490
491 BT_DBG("conn %p", conn);
492
493 read_lock(&l->lock);
494
495 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 bh_lock_sock(sk);
497
498 if (sk->sk_type != SOCK_SEQPACKET) {
499 l2cap_sock_clear_timer(sk);
500 sk->sk_state = BT_CONNECTED;
501 sk->sk_state_change(sk);
502 } else if (sk->sk_state == BT_CONNECT)
503 l2cap_do_start(sk);
504
505 bh_unlock_sock(sk);
506 }
507
508 read_unlock(&l->lock);
509 }
510
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
513 {
514 struct l2cap_chan_list *l = &conn->chan_list;
515 struct sock *sk;
516
517 BT_DBG("conn %p", conn);
518
519 read_lock(&l->lock);
520
521 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
522 if (l2cap_pi(sk)->force_reliable)
523 sk->sk_err = err;
524 }
525
526 read_unlock(&l->lock);
527 }
528
529 static void l2cap_info_timeout(unsigned long arg)
530 {
531 struct l2cap_conn *conn = (void *) arg;
532
533 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
534 conn->info_ident = 0;
535
536 l2cap_conn_start(conn);
537 }
538
539 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
540 {
541 struct l2cap_conn *conn = hcon->l2cap_data;
542
543 if (conn || status)
544 return conn;
545
546 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
547 if (!conn)
548 return NULL;
549
550 hcon->l2cap_data = conn;
551 conn->hcon = hcon;
552
553 BT_DBG("hcon %p conn %p", hcon, conn);
554
555 conn->mtu = hcon->hdev->acl_mtu;
556 conn->src = &hcon->hdev->bdaddr;
557 conn->dst = &hcon->dst;
558
559 conn->feat_mask = 0;
560
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
563
564 setup_timer(&conn->info_timer, l2cap_info_timeout,
565 (unsigned long) conn);
566
567 conn->disc_reason = 0x13;
568
569 return conn;
570 }
571
572 static void l2cap_conn_del(struct hci_conn *hcon, int err)
573 {
574 struct l2cap_conn *conn = hcon->l2cap_data;
575 struct sock *sk;
576
577 if (!conn)
578 return;
579
580 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
581
582 kfree_skb(conn->rx_skb);
583
584 /* Kill channels */
585 while ((sk = conn->chan_list.head)) {
586 bh_lock_sock(sk);
587 l2cap_chan_del(sk, err);
588 bh_unlock_sock(sk);
589 l2cap_sock_kill(sk);
590 }
591
592 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
593 del_timer_sync(&conn->info_timer);
594
595 hcon->l2cap_data = NULL;
596 kfree(conn);
597 }
598
599 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
600 {
601 struct l2cap_chan_list *l = &conn->chan_list;
602 write_lock_bh(&l->lock);
603 __l2cap_chan_add(conn, sk, parent);
604 write_unlock_bh(&l->lock);
605 }
606
607 /* ---- Socket interface ---- */
608 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
609 {
610 struct sock *sk;
611 struct hlist_node *node;
612 sk_for_each(sk, node, &l2cap_sk_list.head)
613 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
614 goto found;
615 sk = NULL;
616 found:
617 return sk;
618 }
619
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
622 */
623 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
624 {
625 struct sock *sk = NULL, *sk1 = NULL;
626 struct hlist_node *node;
627
628 sk_for_each(sk, node, &l2cap_sk_list.head) {
629 if (state && sk->sk_state != state)
630 continue;
631
632 if (l2cap_pi(sk)->psm == psm) {
633 /* Exact match. */
634 if (!bacmp(&bt_sk(sk)->src, src))
635 break;
636
637 /* Closest match */
638 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
639 sk1 = sk;
640 }
641 }
642 return node ? sk : sk1;
643 }
644
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
648 {
649 struct sock *s;
650 read_lock(&l2cap_sk_list.lock);
651 s = __l2cap_get_sock_by_psm(state, psm, src);
652 if (s)
653 bh_lock_sock(s);
654 read_unlock(&l2cap_sk_list.lock);
655 return s;
656 }
657
658 static void l2cap_sock_destruct(struct sock *sk)
659 {
660 BT_DBG("sk %p", sk);
661
662 skb_queue_purge(&sk->sk_receive_queue);
663 skb_queue_purge(&sk->sk_write_queue);
664 }
665
666 static void l2cap_sock_cleanup_listen(struct sock *parent)
667 {
668 struct sock *sk;
669
670 BT_DBG("parent %p", parent);
671
672 /* Close not yet accepted channels */
673 while ((sk = bt_accept_dequeue(parent, NULL)))
674 l2cap_sock_close(sk);
675
676 parent->sk_state = BT_CLOSED;
677 sock_set_flag(parent, SOCK_ZAPPED);
678 }
679
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
682 */
683 static void l2cap_sock_kill(struct sock *sk)
684 {
685 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
686 return;
687
688 BT_DBG("sk %p state %d", sk, sk->sk_state);
689
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list, sk);
692 sock_set_flag(sk, SOCK_DEAD);
693 sock_put(sk);
694 }
695
696 static void __l2cap_sock_close(struct sock *sk, int reason)
697 {
698 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
699
700 switch (sk->sk_state) {
701 case BT_LISTEN:
702 l2cap_sock_cleanup_listen(sk);
703 break;
704
705 case BT_CONNECTED:
706 case BT_CONFIG:
707 if (sk->sk_type == SOCK_SEQPACKET) {
708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
709
710 sk->sk_state = BT_DISCONN;
711 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
712 l2cap_send_disconn_req(conn, sk);
713 } else
714 l2cap_chan_del(sk, reason);
715 break;
716
717 case BT_CONNECT2:
718 if (sk->sk_type == SOCK_SEQPACKET) {
719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
720 struct l2cap_conn_rsp rsp;
721 __u16 result;
722
723 if (bt_sk(sk)->defer_setup)
724 result = L2CAP_CR_SEC_BLOCK;
725 else
726 result = L2CAP_CR_BAD_PSM;
727
728 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
729 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
730 rsp.result = cpu_to_le16(result);
731 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
732 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
733 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
734 } else
735 l2cap_chan_del(sk, reason);
736 break;
737
738 case BT_CONNECT:
739 case BT_DISCONN:
740 l2cap_chan_del(sk, reason);
741 break;
742
743 default:
744 sock_set_flag(sk, SOCK_ZAPPED);
745 break;
746 }
747 }
748
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock *sk)
751 {
752 l2cap_sock_clear_timer(sk);
753 lock_sock(sk);
754 __l2cap_sock_close(sk, ECONNRESET);
755 release_sock(sk);
756 l2cap_sock_kill(sk);
757 }
758
759 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
760 {
761 struct l2cap_pinfo *pi = l2cap_pi(sk);
762
763 BT_DBG("sk %p", sk);
764
765 if (parent) {
766 sk->sk_type = parent->sk_type;
767 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
768
769 pi->imtu = l2cap_pi(parent)->imtu;
770 pi->omtu = l2cap_pi(parent)->omtu;
771 pi->mode = l2cap_pi(parent)->mode;
772 pi->fcs = l2cap_pi(parent)->fcs;
773 pi->sec_level = l2cap_pi(parent)->sec_level;
774 pi->role_switch = l2cap_pi(parent)->role_switch;
775 pi->force_reliable = l2cap_pi(parent)->force_reliable;
776 } else {
777 pi->imtu = L2CAP_DEFAULT_MTU;
778 pi->omtu = 0;
779 pi->mode = L2CAP_MODE_BASIC;
780 pi->fcs = L2CAP_FCS_CRC16;
781 pi->sec_level = BT_SECURITY_LOW;
782 pi->role_switch = 0;
783 pi->force_reliable = 0;
784 }
785
786 /* Default config options */
787 pi->conf_len = 0;
788 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
789 skb_queue_head_init(TX_QUEUE(sk));
790 skb_queue_head_init(SREJ_QUEUE(sk));
791 INIT_LIST_HEAD(SREJ_LIST(sk));
792 }
793
794 static struct proto l2cap_proto = {
795 .name = "L2CAP",
796 .owner = THIS_MODULE,
797 .obj_size = sizeof(struct l2cap_pinfo)
798 };
799
800 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
801 {
802 struct sock *sk;
803
804 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
805 if (!sk)
806 return NULL;
807
808 sock_init_data(sock, sk);
809 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
810
811 sk->sk_destruct = l2cap_sock_destruct;
812 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
813
814 sock_reset_flag(sk, SOCK_ZAPPED);
815
816 sk->sk_protocol = proto;
817 sk->sk_state = BT_OPEN;
818
819 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
820
821 bt_sock_link(&l2cap_sk_list, sk);
822 return sk;
823 }
824
825 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
826 int kern)
827 {
828 struct sock *sk;
829
830 BT_DBG("sock %p", sock);
831
832 sock->state = SS_UNCONNECTED;
833
834 if (sock->type != SOCK_SEQPACKET &&
835 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
836 return -ESOCKTNOSUPPORT;
837
838 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
839 return -EPERM;
840
841 sock->ops = &l2cap_sock_ops;
842
843 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
844 if (!sk)
845 return -ENOMEM;
846
847 l2cap_sock_init(sk, NULL);
848 return 0;
849 }
850
851 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
852 {
853 struct sock *sk = sock->sk;
854 struct sockaddr_l2 la;
855 int len, err = 0;
856
857 BT_DBG("sk %p", sk);
858
859 if (!addr || addr->sa_family != AF_BLUETOOTH)
860 return -EINVAL;
861
862 memset(&la, 0, sizeof(la));
863 len = min_t(unsigned int, sizeof(la), alen);
864 memcpy(&la, addr, len);
865
866 if (la.l2_cid)
867 return -EINVAL;
868
869 lock_sock(sk);
870
871 if (sk->sk_state != BT_OPEN) {
872 err = -EBADFD;
873 goto done;
874 }
875
876 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE)) {
878 err = -EACCES;
879 goto done;
880 }
881
882 write_lock_bh(&l2cap_sk_list.lock);
883
884 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
885 err = -EADDRINUSE;
886 } else {
887 /* Save source address */
888 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
889 l2cap_pi(sk)->psm = la.l2_psm;
890 l2cap_pi(sk)->sport = la.l2_psm;
891 sk->sk_state = BT_BOUND;
892
893 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
894 __le16_to_cpu(la.l2_psm) == 0x0003)
895 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
896 }
897
898 write_unlock_bh(&l2cap_sk_list.lock);
899
900 done:
901 release_sock(sk);
902 return err;
903 }
904
905 static int l2cap_do_connect(struct sock *sk)
906 {
907 bdaddr_t *src = &bt_sk(sk)->src;
908 bdaddr_t *dst = &bt_sk(sk)->dst;
909 struct l2cap_conn *conn;
910 struct hci_conn *hcon;
911 struct hci_dev *hdev;
912 __u8 auth_type;
913 int err;
914
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
916 l2cap_pi(sk)->psm);
917
918 hdev = hci_get_route(dst, src);
919 if (!hdev)
920 return -EHOSTUNREACH;
921
922 hci_dev_lock_bh(hdev);
923
924 err = -ENOMEM;
925
926 if (sk->sk_type == SOCK_RAW) {
927 switch (l2cap_pi(sk)->sec_level) {
928 case BT_SECURITY_HIGH:
929 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
930 break;
931 case BT_SECURITY_MEDIUM:
932 auth_type = HCI_AT_DEDICATED_BONDING;
933 break;
934 default:
935 auth_type = HCI_AT_NO_BONDING;
936 break;
937 }
938 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
940 auth_type = HCI_AT_NO_BONDING_MITM;
941 else
942 auth_type = HCI_AT_NO_BONDING;
943
944 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
945 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
946 } else {
947 switch (l2cap_pi(sk)->sec_level) {
948 case BT_SECURITY_HIGH:
949 auth_type = HCI_AT_GENERAL_BONDING_MITM;
950 break;
951 case BT_SECURITY_MEDIUM:
952 auth_type = HCI_AT_GENERAL_BONDING;
953 break;
954 default:
955 auth_type = HCI_AT_NO_BONDING;
956 break;
957 }
958 }
959
960 hcon = hci_connect(hdev, ACL_LINK, dst,
961 l2cap_pi(sk)->sec_level, auth_type);
962 if (!hcon)
963 goto done;
964
965 conn = l2cap_conn_add(hcon, 0);
966 if (!conn) {
967 hci_conn_put(hcon);
968 goto done;
969 }
970
971 err = 0;
972
973 /* Update source addr of the socket */
974 bacpy(src, conn->src);
975
976 l2cap_chan_add(conn, sk, NULL);
977
978 sk->sk_state = BT_CONNECT;
979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
980
981 if (hcon->state == BT_CONNECTED) {
982 if (sk->sk_type != SOCK_SEQPACKET) {
983 l2cap_sock_clear_timer(sk);
984 sk->sk_state = BT_CONNECTED;
985 } else
986 l2cap_do_start(sk);
987 }
988
989 done:
990 hci_dev_unlock_bh(hdev);
991 hci_dev_put(hdev);
992 return err;
993 }
994
995 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
996 {
997 struct sock *sk = sock->sk;
998 struct sockaddr_l2 la;
999 int len, err = 0;
1000
1001 BT_DBG("sk %p", sk);
1002
1003 if (!addr || addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL;
1005
1006 memset(&la, 0, sizeof(la));
1007 len = min_t(unsigned int, sizeof(la), alen);
1008 memcpy(&la, addr, len);
1009
1010 if (la.l2_cid)
1011 return -EINVAL;
1012
1013 lock_sock(sk);
1014
1015 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1016 err = -EINVAL;
1017 goto done;
1018 }
1019
1020 switch (l2cap_pi(sk)->mode) {
1021 case L2CAP_MODE_BASIC:
1022 break;
1023 case L2CAP_MODE_ERTM:
1024 case L2CAP_MODE_STREAMING:
1025 if (enable_ertm)
1026 break;
1027 /* fall through */
1028 default:
1029 err = -ENOTSUPP;
1030 goto done;
1031 }
1032
1033 switch (sk->sk_state) {
1034 case BT_CONNECT:
1035 case BT_CONNECT2:
1036 case BT_CONFIG:
1037 /* Already connecting */
1038 goto wait;
1039
1040 case BT_CONNECTED:
1041 /* Already connected */
1042 goto done;
1043
1044 case BT_OPEN:
1045 case BT_BOUND:
1046 /* Can connect */
1047 break;
1048
1049 default:
1050 err = -EBADFD;
1051 goto done;
1052 }
1053
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1056 l2cap_pi(sk)->psm = la.l2_psm;
1057
1058 err = l2cap_do_connect(sk);
1059 if (err)
1060 goto done;
1061
1062 wait:
1063 err = bt_sock_wait_state(sk, BT_CONNECTED,
1064 sock_sndtimeo(sk, flags & O_NONBLOCK));
1065 done:
1066 release_sock(sk);
1067 return err;
1068 }
1069
1070 static int l2cap_sock_listen(struct socket *sock, int backlog)
1071 {
1072 struct sock *sk = sock->sk;
1073 int err = 0;
1074
1075 BT_DBG("sk %p backlog %d", sk, backlog);
1076
1077 lock_sock(sk);
1078
1079 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1080 err = -EBADFD;
1081 goto done;
1082 }
1083
1084 switch (l2cap_pi(sk)->mode) {
1085 case L2CAP_MODE_BASIC:
1086 break;
1087 case L2CAP_MODE_ERTM:
1088 case L2CAP_MODE_STREAMING:
1089 if (enable_ertm)
1090 break;
1091 /* fall through */
1092 default:
1093 err = -ENOTSUPP;
1094 goto done;
1095 }
1096
1097 if (!l2cap_pi(sk)->psm) {
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 u16 psm;
1100
1101 err = -EINVAL;
1102
1103 write_lock_bh(&l2cap_sk_list.lock);
1104
1105 for (psm = 0x1001; psm < 0x1100; psm += 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1107 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1108 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1109 err = 0;
1110 break;
1111 }
1112
1113 write_unlock_bh(&l2cap_sk_list.lock);
1114
1115 if (err < 0)
1116 goto done;
1117 }
1118
1119 sk->sk_max_ack_backlog = backlog;
1120 sk->sk_ack_backlog = 0;
1121 sk->sk_state = BT_LISTEN;
1122
1123 done:
1124 release_sock(sk);
1125 return err;
1126 }
1127
1128 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1129 {
1130 DECLARE_WAITQUEUE(wait, current);
1131 struct sock *sk = sock->sk, *nsk;
1132 long timeo;
1133 int err = 0;
1134
1135 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1136
1137 if (sk->sk_state != BT_LISTEN) {
1138 err = -EBADFD;
1139 goto done;
1140 }
1141
1142 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1143
1144 BT_DBG("sk %p timeo %ld", sk, timeo);
1145
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1148 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1149 set_current_state(TASK_INTERRUPTIBLE);
1150 if (!timeo) {
1151 err = -EAGAIN;
1152 break;
1153 }
1154
1155 release_sock(sk);
1156 timeo = schedule_timeout(timeo);
1157 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1158
1159 if (sk->sk_state != BT_LISTEN) {
1160 err = -EBADFD;
1161 break;
1162 }
1163
1164 if (signal_pending(current)) {
1165 err = sock_intr_errno(timeo);
1166 break;
1167 }
1168 }
1169 set_current_state(TASK_RUNNING);
1170 remove_wait_queue(sk->sk_sleep, &wait);
1171
1172 if (err)
1173 goto done;
1174
1175 newsock->state = SS_CONNECTED;
1176
1177 BT_DBG("new socket %p", nsk);
1178
1179 done:
1180 release_sock(sk);
1181 return err;
1182 }
1183
1184 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1185 {
1186 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1187 struct sock *sk = sock->sk;
1188
1189 BT_DBG("sock %p, sk %p", sock, sk);
1190
1191 addr->sa_family = AF_BLUETOOTH;
1192 *len = sizeof(struct sockaddr_l2);
1193
1194 if (peer) {
1195 la->l2_psm = l2cap_pi(sk)->psm;
1196 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1197 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1198 } else {
1199 la->l2_psm = l2cap_pi(sk)->sport;
1200 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1201 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1202 }
1203
1204 return 0;
1205 }
1206
1207 static void l2cap_monitor_timeout(unsigned long arg)
1208 {
1209 struct sock *sk = (void *) arg;
1210 u16 control;
1211
1212 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1215 return;
1216 }
1217
1218 l2cap_pi(sk)->retry_count++;
1219 __mod_monitor_timer();
1220
1221 control = L2CAP_CTRL_POLL;
1222 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1223 bh_unlock_sock(sk);
1224 }
1225
1226 static void l2cap_retrans_timeout(unsigned long arg)
1227 {
1228 struct sock *sk = (void *) arg;
1229 u16 control;
1230
1231 bh_lock_sock(sk);
1232 l2cap_pi(sk)->retry_count = 1;
1233 __mod_monitor_timer();
1234
1235 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1236
1237 control = L2CAP_CTRL_POLL;
1238 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1239 bh_unlock_sock(sk);
1240 }
1241
1242 static void l2cap_drop_acked_frames(struct sock *sk)
1243 {
1244 struct sk_buff *skb;
1245
1246 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1247 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1248 break;
1249
1250 skb = skb_dequeue(TX_QUEUE(sk));
1251 kfree_skb(skb);
1252
1253 l2cap_pi(sk)->unacked_frames--;
1254 }
1255
1256 if (!l2cap_pi(sk)->unacked_frames)
1257 del_timer(&l2cap_pi(sk)->retrans_timer);
1258
1259 return;
1260 }
1261
1262 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1263 {
1264 struct l2cap_pinfo *pi = l2cap_pi(sk);
1265 int err;
1266
1267 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1268
1269 err = hci_send_acl(pi->conn->hcon, skb, 0);
1270 if (err < 0)
1271 kfree_skb(skb);
1272
1273 return err;
1274 }
1275
1276 static int l2cap_streaming_send(struct sock *sk)
1277 {
1278 struct sk_buff *skb, *tx_skb;
1279 struct l2cap_pinfo *pi = l2cap_pi(sk);
1280 u16 control, fcs;
1281 int err;
1282
1283 while ((skb = sk->sk_send_head)) {
1284 tx_skb = skb_clone(skb, GFP_ATOMIC);
1285
1286 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1287 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1288 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1289
1290 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1291 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1292 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1293 }
1294
1295 err = l2cap_do_send(sk, tx_skb);
1296 if (err < 0) {
1297 l2cap_send_disconn_req(pi->conn, sk);
1298 return err;
1299 }
1300
1301 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1302
1303 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1304 sk->sk_send_head = NULL;
1305 else
1306 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1307
1308 skb = skb_dequeue(TX_QUEUE(sk));
1309 kfree_skb(skb);
1310 }
1311 return 0;
1312 }
1313
1314 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1315 {
1316 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 struct sk_buff *skb, *tx_skb;
1318 u16 control, fcs;
1319 int err;
1320
1321 skb = skb_peek(TX_QUEUE(sk));
1322 do {
1323 if (bt_cb(skb)->tx_seq != tx_seq) {
1324 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1325 break;
1326 skb = skb_queue_next(TX_QUEUE(sk), skb);
1327 continue;
1328 }
1329
1330 if (pi->remote_max_tx &&
1331 bt_cb(skb)->retries == pi->remote_max_tx) {
1332 l2cap_send_disconn_req(pi->conn, sk);
1333 break;
1334 }
1335
1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1337 bt_cb(skb)->retries++;
1338 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1339 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1340 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1341 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1342
1343 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1345 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1346 }
1347
1348 err = l2cap_do_send(sk, tx_skb);
1349 if (err < 0) {
1350 l2cap_send_disconn_req(pi->conn, sk);
1351 return err;
1352 }
1353 break;
1354 } while(1);
1355 return 0;
1356 }
1357
1358 static int l2cap_ertm_send(struct sock *sk)
1359 {
1360 struct sk_buff *skb, *tx_skb;
1361 struct l2cap_pinfo *pi = l2cap_pi(sk);
1362 u16 control, fcs;
1363 int err;
1364
1365 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1366 return 0;
1367
1368 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1369 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1370 tx_skb = skb_clone(skb, GFP_ATOMIC);
1371
1372 if (pi->remote_max_tx &&
1373 bt_cb(skb)->retries == pi->remote_max_tx) {
1374 l2cap_send_disconn_req(pi->conn, sk);
1375 break;
1376 }
1377
1378 bt_cb(skb)->retries++;
1379
1380 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1381 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1382 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1383 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1384
1385
1386 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1388 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1389 }
1390
1391 err = l2cap_do_send(sk, tx_skb);
1392 if (err < 0) {
1393 l2cap_send_disconn_req(pi->conn, sk);
1394 return err;
1395 }
1396 __mod_retrans_timer();
1397
1398 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1399 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1400
1401 pi->unacked_frames++;
1402
1403 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1404 sk->sk_send_head = NULL;
1405 else
1406 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1407 }
1408
1409 return 0;
1410 }
1411
1412 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1413 {
1414 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1415 struct sk_buff **frag;
1416 int err, sent = 0;
1417
1418 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1419 return -EFAULT;
1420 }
1421
1422 sent += count;
1423 len -= count;
1424
1425 /* Continuation fragments (no L2CAP header) */
1426 frag = &skb_shinfo(skb)->frag_list;
1427 while (len) {
1428 count = min_t(unsigned int, conn->mtu, len);
1429
1430 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1431 if (!*frag)
1432 return -EFAULT;
1433 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1434 return -EFAULT;
1435
1436 sent += count;
1437 len -= count;
1438
1439 frag = &(*frag)->next;
1440 }
1441
1442 return sent;
1443 }
1444
1445 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1446 {
1447 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1448 struct sk_buff *skb;
1449 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1450 struct l2cap_hdr *lh;
1451
1452 BT_DBG("sk %p len %d", sk, (int)len);
1453
1454 count = min_t(unsigned int, (conn->mtu - hlen), len);
1455 skb = bt_skb_send_alloc(sk, count + hlen,
1456 msg->msg_flags & MSG_DONTWAIT, &err);
1457 if (!skb)
1458 return ERR_PTR(-ENOMEM);
1459
1460 /* Create L2CAP header */
1461 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1462 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1463 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1464 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1465
1466 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1467 if (unlikely(err < 0)) {
1468 kfree_skb(skb);
1469 return ERR_PTR(err);
1470 }
1471 return skb;
1472 }
1473
1474 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1475 {
1476 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1477 struct sk_buff *skb;
1478 int err, count, hlen = L2CAP_HDR_SIZE;
1479 struct l2cap_hdr *lh;
1480
1481 BT_DBG("sk %p len %d", sk, (int)len);
1482
1483 count = min_t(unsigned int, (conn->mtu - hlen), len);
1484 skb = bt_skb_send_alloc(sk, count + hlen,
1485 msg->msg_flags & MSG_DONTWAIT, &err);
1486 if (!skb)
1487 return ERR_PTR(-ENOMEM);
1488
1489 /* Create L2CAP header */
1490 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1491 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1492 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1493
1494 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1495 if (unlikely(err < 0)) {
1496 kfree_skb(skb);
1497 return ERR_PTR(err);
1498 }
1499 return skb;
1500 }
1501
1502 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1503 {
1504 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1505 struct sk_buff *skb;
1506 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1507 struct l2cap_hdr *lh;
1508
1509 BT_DBG("sk %p len %d", sk, (int)len);
1510
1511 if (sdulen)
1512 hlen += 2;
1513
1514 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1515 hlen += 2;
1516
1517 count = min_t(unsigned int, (conn->mtu - hlen), len);
1518 skb = bt_skb_send_alloc(sk, count + hlen,
1519 msg->msg_flags & MSG_DONTWAIT, &err);
1520 if (!skb)
1521 return ERR_PTR(-ENOMEM);
1522
1523 /* Create L2CAP header */
1524 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1525 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1526 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1527 put_unaligned_le16(control, skb_put(skb, 2));
1528 if (sdulen)
1529 put_unaligned_le16(sdulen, skb_put(skb, 2));
1530
1531 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1532 if (unlikely(err < 0)) {
1533 kfree_skb(skb);
1534 return ERR_PTR(err);
1535 }
1536
1537 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1538 put_unaligned_le16(0, skb_put(skb, 2));
1539
1540 bt_cb(skb)->retries = 0;
1541 return skb;
1542 }
1543
1544 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1545 {
1546 struct l2cap_pinfo *pi = l2cap_pi(sk);
1547 struct sk_buff *skb;
1548 struct sk_buff_head sar_queue;
1549 u16 control;
1550 size_t size = 0;
1551
1552 __skb_queue_head_init(&sar_queue);
1553 control = L2CAP_SDU_START;
1554 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1555 if (IS_ERR(skb))
1556 return PTR_ERR(skb);
1557
1558 __skb_queue_tail(&sar_queue, skb);
1559 len -= pi->max_pdu_size;
1560 size +=pi->max_pdu_size;
1561 control = 0;
1562
1563 while (len > 0) {
1564 size_t buflen;
1565
1566 if (len > pi->max_pdu_size) {
1567 control |= L2CAP_SDU_CONTINUE;
1568 buflen = pi->max_pdu_size;
1569 } else {
1570 control |= L2CAP_SDU_END;
1571 buflen = len;
1572 }
1573
1574 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1575 if (IS_ERR(skb)) {
1576 skb_queue_purge(&sar_queue);
1577 return PTR_ERR(skb);
1578 }
1579
1580 __skb_queue_tail(&sar_queue, skb);
1581 len -= buflen;
1582 size += buflen;
1583 control = 0;
1584 }
1585 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1586 if (sk->sk_send_head == NULL)
1587 sk->sk_send_head = sar_queue.next;
1588
1589 return size;
1590 }
1591
1592 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1593 {
1594 struct sock *sk = sock->sk;
1595 struct l2cap_pinfo *pi = l2cap_pi(sk);
1596 struct sk_buff *skb;
1597 u16 control;
1598 int err;
1599
1600 BT_DBG("sock %p, sk %p", sock, sk);
1601
1602 err = sock_error(sk);
1603 if (err)
1604 return err;
1605
1606 if (msg->msg_flags & MSG_OOB)
1607 return -EOPNOTSUPP;
1608
1609 /* Check outgoing MTU */
1610 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1611 len > pi->omtu)
1612 return -EINVAL;
1613
1614 lock_sock(sk);
1615
1616 if (sk->sk_state != BT_CONNECTED) {
1617 err = -ENOTCONN;
1618 goto done;
1619 }
1620
1621 /* Connectionless channel */
1622 if (sk->sk_type == SOCK_DGRAM) {
1623 skb = l2cap_create_connless_pdu(sk, msg, len);
1624 err = l2cap_do_send(sk, skb);
1625 goto done;
1626 }
1627
1628 switch (pi->mode) {
1629 case L2CAP_MODE_BASIC:
1630 /* Create a basic PDU */
1631 skb = l2cap_create_basic_pdu(sk, msg, len);
1632 if (IS_ERR(skb)) {
1633 err = PTR_ERR(skb);
1634 goto done;
1635 }
1636
1637 err = l2cap_do_send(sk, skb);
1638 if (!err)
1639 err = len;
1640 break;
1641
1642 case L2CAP_MODE_ERTM:
1643 case L2CAP_MODE_STREAMING:
1644 /* Entire SDU fits into one PDU */
1645 if (len <= pi->max_pdu_size) {
1646 control = L2CAP_SDU_UNSEGMENTED;
1647 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1648 if (IS_ERR(skb)) {
1649 err = PTR_ERR(skb);
1650 goto done;
1651 }
1652 __skb_queue_tail(TX_QUEUE(sk), skb);
1653 if (sk->sk_send_head == NULL)
1654 sk->sk_send_head = skb;
1655 } else {
1656 /* Segment SDU into multiples PDUs */
1657 err = l2cap_sar_segment_sdu(sk, msg, len);
1658 if (err < 0)
1659 goto done;
1660 }
1661
1662 if (pi->mode == L2CAP_MODE_STREAMING)
1663 err = l2cap_streaming_send(sk);
1664 else
1665 err = l2cap_ertm_send(sk);
1666
1667 if (!err)
1668 err = len;
1669 break;
1670
1671 default:
1672 BT_DBG("bad state %1.1x", pi->mode);
1673 err = -EINVAL;
1674 }
1675
1676 done:
1677 release_sock(sk);
1678 return err;
1679 }
1680
1681 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1682 {
1683 struct sock *sk = sock->sk;
1684
1685 lock_sock(sk);
1686
1687 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1688 struct l2cap_conn_rsp rsp;
1689
1690 sk->sk_state = BT_CONFIG;
1691
1692 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1693 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1694 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1696 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1697 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1698
1699 release_sock(sk);
1700 return 0;
1701 }
1702
1703 release_sock(sk);
1704
1705 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1706 }
1707
1708 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1709 {
1710 struct sock *sk = sock->sk;
1711 struct l2cap_options opts;
1712 int len, err = 0;
1713 u32 opt;
1714
1715 BT_DBG("sk %p", sk);
1716
1717 lock_sock(sk);
1718
1719 switch (optname) {
1720 case L2CAP_OPTIONS:
1721 opts.imtu = l2cap_pi(sk)->imtu;
1722 opts.omtu = l2cap_pi(sk)->omtu;
1723 opts.flush_to = l2cap_pi(sk)->flush_to;
1724 opts.mode = l2cap_pi(sk)->mode;
1725 opts.fcs = l2cap_pi(sk)->fcs;
1726
1727 len = min_t(unsigned int, sizeof(opts), optlen);
1728 if (copy_from_user((char *) &opts, optval, len)) {
1729 err = -EFAULT;
1730 break;
1731 }
1732
1733 l2cap_pi(sk)->imtu = opts.imtu;
1734 l2cap_pi(sk)->omtu = opts.omtu;
1735 l2cap_pi(sk)->mode = opts.mode;
1736 l2cap_pi(sk)->fcs = opts.fcs;
1737 break;
1738
1739 case L2CAP_LM:
1740 if (get_user(opt, (u32 __user *) optval)) {
1741 err = -EFAULT;
1742 break;
1743 }
1744
1745 if (opt & L2CAP_LM_AUTH)
1746 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1747 if (opt & L2CAP_LM_ENCRYPT)
1748 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1749 if (opt & L2CAP_LM_SECURE)
1750 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1751
1752 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1753 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1754 break;
1755
1756 default:
1757 err = -ENOPROTOOPT;
1758 break;
1759 }
1760
1761 release_sock(sk);
1762 return err;
1763 }
1764
1765 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1766 {
1767 struct sock *sk = sock->sk;
1768 struct bt_security sec;
1769 int len, err = 0;
1770 u32 opt;
1771
1772 BT_DBG("sk %p", sk);
1773
1774 if (level == SOL_L2CAP)
1775 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1776
1777 if (level != SOL_BLUETOOTH)
1778 return -ENOPROTOOPT;
1779
1780 lock_sock(sk);
1781
1782 switch (optname) {
1783 case BT_SECURITY:
1784 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1785 err = -EINVAL;
1786 break;
1787 }
1788
1789 sec.level = BT_SECURITY_LOW;
1790
1791 len = min_t(unsigned int, sizeof(sec), optlen);
1792 if (copy_from_user((char *) &sec, optval, len)) {
1793 err = -EFAULT;
1794 break;
1795 }
1796
1797 if (sec.level < BT_SECURITY_LOW ||
1798 sec.level > BT_SECURITY_HIGH) {
1799 err = -EINVAL;
1800 break;
1801 }
1802
1803 l2cap_pi(sk)->sec_level = sec.level;
1804 break;
1805
1806 case BT_DEFER_SETUP:
1807 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1808 err = -EINVAL;
1809 break;
1810 }
1811
1812 if (get_user(opt, (u32 __user *) optval)) {
1813 err = -EFAULT;
1814 break;
1815 }
1816
1817 bt_sk(sk)->defer_setup = opt;
1818 break;
1819
1820 default:
1821 err = -ENOPROTOOPT;
1822 break;
1823 }
1824
1825 release_sock(sk);
1826 return err;
1827 }
1828
1829 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1830 {
1831 struct sock *sk = sock->sk;
1832 struct l2cap_options opts;
1833 struct l2cap_conninfo cinfo;
1834 int len, err = 0;
1835 u32 opt;
1836
1837 BT_DBG("sk %p", sk);
1838
1839 if (get_user(len, optlen))
1840 return -EFAULT;
1841
1842 lock_sock(sk);
1843
1844 switch (optname) {
1845 case L2CAP_OPTIONS:
1846 opts.imtu = l2cap_pi(sk)->imtu;
1847 opts.omtu = l2cap_pi(sk)->omtu;
1848 opts.flush_to = l2cap_pi(sk)->flush_to;
1849 opts.mode = l2cap_pi(sk)->mode;
1850 opts.fcs = l2cap_pi(sk)->fcs;
1851
1852 len = min_t(unsigned int, len, sizeof(opts));
1853 if (copy_to_user(optval, (char *) &opts, len))
1854 err = -EFAULT;
1855
1856 break;
1857
1858 case L2CAP_LM:
1859 switch (l2cap_pi(sk)->sec_level) {
1860 case BT_SECURITY_LOW:
1861 opt = L2CAP_LM_AUTH;
1862 break;
1863 case BT_SECURITY_MEDIUM:
1864 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1865 break;
1866 case BT_SECURITY_HIGH:
1867 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1868 L2CAP_LM_SECURE;
1869 break;
1870 default:
1871 opt = 0;
1872 break;
1873 }
1874
1875 if (l2cap_pi(sk)->role_switch)
1876 opt |= L2CAP_LM_MASTER;
1877
1878 if (l2cap_pi(sk)->force_reliable)
1879 opt |= L2CAP_LM_RELIABLE;
1880
1881 if (put_user(opt, (u32 __user *) optval))
1882 err = -EFAULT;
1883 break;
1884
1885 case L2CAP_CONNINFO:
1886 if (sk->sk_state != BT_CONNECTED &&
1887 !(sk->sk_state == BT_CONNECT2 &&
1888 bt_sk(sk)->defer_setup)) {
1889 err = -ENOTCONN;
1890 break;
1891 }
1892
1893 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1894 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1895
1896 len = min_t(unsigned int, len, sizeof(cinfo));
1897 if (copy_to_user(optval, (char *) &cinfo, len))
1898 err = -EFAULT;
1899
1900 break;
1901
1902 default:
1903 err = -ENOPROTOOPT;
1904 break;
1905 }
1906
1907 release_sock(sk);
1908 return err;
1909 }
1910
1911 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1912 {
1913 struct sock *sk = sock->sk;
1914 struct bt_security sec;
1915 int len, err = 0;
1916
1917 BT_DBG("sk %p", sk);
1918
1919 if (level == SOL_L2CAP)
1920 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1921
1922 if (level != SOL_BLUETOOTH)
1923 return -ENOPROTOOPT;
1924
1925 if (get_user(len, optlen))
1926 return -EFAULT;
1927
1928 lock_sock(sk);
1929
1930 switch (optname) {
1931 case BT_SECURITY:
1932 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1933 err = -EINVAL;
1934 break;
1935 }
1936
1937 sec.level = l2cap_pi(sk)->sec_level;
1938
1939 len = min_t(unsigned int, len, sizeof(sec));
1940 if (copy_to_user(optval, (char *) &sec, len))
1941 err = -EFAULT;
1942
1943 break;
1944
1945 case BT_DEFER_SETUP:
1946 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1947 err = -EINVAL;
1948 break;
1949 }
1950
1951 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1952 err = -EFAULT;
1953
1954 break;
1955
1956 default:
1957 err = -ENOPROTOOPT;
1958 break;
1959 }
1960
1961 release_sock(sk);
1962 return err;
1963 }
1964
1965 static int l2cap_sock_shutdown(struct socket *sock, int how)
1966 {
1967 struct sock *sk = sock->sk;
1968 int err = 0;
1969
1970 BT_DBG("sock %p, sk %p", sock, sk);
1971
1972 if (!sk)
1973 return 0;
1974
1975 lock_sock(sk);
1976 if (!sk->sk_shutdown) {
1977 sk->sk_shutdown = SHUTDOWN_MASK;
1978 l2cap_sock_clear_timer(sk);
1979 __l2cap_sock_close(sk, 0);
1980
1981 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1982 err = bt_sock_wait_state(sk, BT_CLOSED,
1983 sk->sk_lingertime);
1984 }
1985 release_sock(sk);
1986 return err;
1987 }
1988
1989 static int l2cap_sock_release(struct socket *sock)
1990 {
1991 struct sock *sk = sock->sk;
1992 int err;
1993
1994 BT_DBG("sock %p, sk %p", sock, sk);
1995
1996 if (!sk)
1997 return 0;
1998
1999 err = l2cap_sock_shutdown(sock, 2);
2000
2001 sock_orphan(sk);
2002 l2cap_sock_kill(sk);
2003 return err;
2004 }
2005
2006 static void l2cap_chan_ready(struct sock *sk)
2007 {
2008 struct sock *parent = bt_sk(sk)->parent;
2009
2010 BT_DBG("sk %p, parent %p", sk, parent);
2011
2012 l2cap_pi(sk)->conf_state = 0;
2013 l2cap_sock_clear_timer(sk);
2014
2015 if (!parent) {
2016 /* Outgoing channel.
2017 * Wake up socket sleeping on connect.
2018 */
2019 sk->sk_state = BT_CONNECTED;
2020 sk->sk_state_change(sk);
2021 } else {
2022 /* Incoming channel.
2023 * Wake up socket sleeping on accept.
2024 */
2025 parent->sk_data_ready(parent, 0);
2026 }
2027 }
2028
2029 /* Copy frame to all raw sockets on that connection */
2030 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2031 {
2032 struct l2cap_chan_list *l = &conn->chan_list;
2033 struct sk_buff *nskb;
2034 struct sock *sk;
2035
2036 BT_DBG("conn %p", conn);
2037
2038 read_lock(&l->lock);
2039 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2040 if (sk->sk_type != SOCK_RAW)
2041 continue;
2042
2043 /* Don't send frame to the socket it came from */
2044 if (skb->sk == sk)
2045 continue;
2046 nskb = skb_clone(skb, GFP_ATOMIC);
2047 if (!nskb)
2048 continue;
2049
2050 if (sock_queue_rcv_skb(sk, nskb))
2051 kfree_skb(nskb);
2052 }
2053 read_unlock(&l->lock);
2054 }
2055
2056 /* ---- L2CAP signalling commands ---- */
2057 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2058 u8 code, u8 ident, u16 dlen, void *data)
2059 {
2060 struct sk_buff *skb, **frag;
2061 struct l2cap_cmd_hdr *cmd;
2062 struct l2cap_hdr *lh;
2063 int len, count;
2064
2065 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2066 conn, code, ident, dlen);
2067
2068 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2069 count = min_t(unsigned int, conn->mtu, len);
2070
2071 skb = bt_skb_alloc(count, GFP_ATOMIC);
2072 if (!skb)
2073 return NULL;
2074
2075 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2076 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2077 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2078
2079 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2080 cmd->code = code;
2081 cmd->ident = ident;
2082 cmd->len = cpu_to_le16(dlen);
2083
2084 if (dlen) {
2085 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2086 memcpy(skb_put(skb, count), data, count);
2087 data += count;
2088 }
2089
2090 len -= skb->len;
2091
2092 /* Continuation fragments (no L2CAP header) */
2093 frag = &skb_shinfo(skb)->frag_list;
2094 while (len) {
2095 count = min_t(unsigned int, conn->mtu, len);
2096
2097 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2098 if (!*frag)
2099 goto fail;
2100
2101 memcpy(skb_put(*frag, count), data, count);
2102
2103 len -= count;
2104 data += count;
2105
2106 frag = &(*frag)->next;
2107 }
2108
2109 return skb;
2110
2111 fail:
2112 kfree_skb(skb);
2113 return NULL;
2114 }
2115
2116 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2117 {
2118 struct l2cap_conf_opt *opt = *ptr;
2119 int len;
2120
2121 len = L2CAP_CONF_OPT_SIZE + opt->len;
2122 *ptr += len;
2123
2124 *type = opt->type;
2125 *olen = opt->len;
2126
2127 switch (opt->len) {
2128 case 1:
2129 *val = *((u8 *) opt->val);
2130 break;
2131
2132 case 2:
2133 *val = __le16_to_cpu(*((__le16 *) opt->val));
2134 break;
2135
2136 case 4:
2137 *val = __le32_to_cpu(*((__le32 *) opt->val));
2138 break;
2139
2140 default:
2141 *val = (unsigned long) opt->val;
2142 break;
2143 }
2144
2145 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2146 return len;
2147 }
2148
2149 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2150 {
2151 struct l2cap_conf_opt *opt = *ptr;
2152
2153 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2154
2155 opt->type = type;
2156 opt->len = len;
2157
2158 switch (len) {
2159 case 1:
2160 *((u8 *) opt->val) = val;
2161 break;
2162
2163 case 2:
2164 *((__le16 *) opt->val) = cpu_to_le16(val);
2165 break;
2166
2167 case 4:
2168 *((__le32 *) opt->val) = cpu_to_le32(val);
2169 break;
2170
2171 default:
2172 memcpy(opt->val, (void *) val, len);
2173 break;
2174 }
2175
2176 *ptr += L2CAP_CONF_OPT_SIZE + len;
2177 }
2178
2179 static inline void l2cap_ertm_init(struct sock *sk)
2180 {
2181 l2cap_pi(sk)->expected_ack_seq = 0;
2182 l2cap_pi(sk)->unacked_frames = 0;
2183 l2cap_pi(sk)->buffer_seq = 0;
2184 l2cap_pi(sk)->num_to_ack = 0;
2185
2186 setup_timer(&l2cap_pi(sk)->retrans_timer,
2187 l2cap_retrans_timeout, (unsigned long) sk);
2188 setup_timer(&l2cap_pi(sk)->monitor_timer,
2189 l2cap_monitor_timeout, (unsigned long) sk);
2190
2191 __skb_queue_head_init(SREJ_QUEUE(sk));
2192 }
2193
2194 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2195 {
2196 u32 local_feat_mask = l2cap_feat_mask;
2197 if (enable_ertm)
2198 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2199
2200 switch (mode) {
2201 case L2CAP_MODE_ERTM:
2202 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2203 case L2CAP_MODE_STREAMING:
2204 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2205 default:
2206 return 0x00;
2207 }
2208 }
2209
2210 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2211 {
2212 switch (mode) {
2213 case L2CAP_MODE_STREAMING:
2214 case L2CAP_MODE_ERTM:
2215 if (l2cap_mode_supported(mode, remote_feat_mask))
2216 return mode;
2217 /* fall through */
2218 default:
2219 return L2CAP_MODE_BASIC;
2220 }
2221 }
2222
2223 static int l2cap_build_conf_req(struct sock *sk, void *data)
2224 {
2225 struct l2cap_pinfo *pi = l2cap_pi(sk);
2226 struct l2cap_conf_req *req = data;
2227 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2228 void *ptr = req->data;
2229
2230 BT_DBG("sk %p", sk);
2231
2232 if (pi->num_conf_req || pi->num_conf_rsp)
2233 goto done;
2234
2235 switch (pi->mode) {
2236 case L2CAP_MODE_STREAMING:
2237 case L2CAP_MODE_ERTM:
2238 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2239 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2240 l2cap_send_disconn_req(pi->conn, sk);
2241 break;
2242 default:
2243 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2244 break;
2245 }
2246
2247 done:
2248 switch (pi->mode) {
2249 case L2CAP_MODE_BASIC:
2250 if (pi->imtu != L2CAP_DEFAULT_MTU)
2251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2252 break;
2253
2254 case L2CAP_MODE_ERTM:
2255 rfc.mode = L2CAP_MODE_ERTM;
2256 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2257 rfc.max_transmit = max_transmit;
2258 rfc.retrans_timeout = 0;
2259 rfc.monitor_timeout = 0;
2260 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2261
2262 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2263 sizeof(rfc), (unsigned long) &rfc);
2264
2265 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2266 break;
2267
2268 if (pi->fcs == L2CAP_FCS_NONE ||
2269 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2270 pi->fcs = L2CAP_FCS_NONE;
2271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2272 }
2273 break;
2274
2275 case L2CAP_MODE_STREAMING:
2276 rfc.mode = L2CAP_MODE_STREAMING;
2277 rfc.txwin_size = 0;
2278 rfc.max_transmit = 0;
2279 rfc.retrans_timeout = 0;
2280 rfc.monitor_timeout = 0;
2281 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2282
2283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2284 sizeof(rfc), (unsigned long) &rfc);
2285
2286 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2287 break;
2288
2289 if (pi->fcs == L2CAP_FCS_NONE ||
2290 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2291 pi->fcs = L2CAP_FCS_NONE;
2292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2293 }
2294 break;
2295 }
2296
2297 /* FIXME: Need actual value of the flush timeout */
2298 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2299 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2300
2301 req->dcid = cpu_to_le16(pi->dcid);
2302 req->flags = cpu_to_le16(0);
2303
2304 return ptr - data;
2305 }
2306
2307 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2308 {
2309 struct l2cap_pinfo *pi = l2cap_pi(sk);
2310 struct l2cap_conf_rsp *rsp = data;
2311 void *ptr = rsp->data;
2312 void *req = pi->conf_req;
2313 int len = pi->conf_len;
2314 int type, hint, olen;
2315 unsigned long val;
2316 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2317 u16 mtu = L2CAP_DEFAULT_MTU;
2318 u16 result = L2CAP_CONF_SUCCESS;
2319
2320 BT_DBG("sk %p", sk);
2321
2322 while (len >= L2CAP_CONF_OPT_SIZE) {
2323 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2324
2325 hint = type & L2CAP_CONF_HINT;
2326 type &= L2CAP_CONF_MASK;
2327
2328 switch (type) {
2329 case L2CAP_CONF_MTU:
2330 mtu = val;
2331 break;
2332
2333 case L2CAP_CONF_FLUSH_TO:
2334 pi->flush_to = val;
2335 break;
2336
2337 case L2CAP_CONF_QOS:
2338 break;
2339
2340 case L2CAP_CONF_RFC:
2341 if (olen == sizeof(rfc))
2342 memcpy(&rfc, (void *) val, olen);
2343 break;
2344
2345 case L2CAP_CONF_FCS:
2346 if (val == L2CAP_FCS_NONE)
2347 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2348
2349 break;
2350
2351 default:
2352 if (hint)
2353 break;
2354
2355 result = L2CAP_CONF_UNKNOWN;
2356 *((u8 *) ptr++) = type;
2357 break;
2358 }
2359 }
2360
2361 if (pi->num_conf_rsp || pi->num_conf_req)
2362 goto done;
2363
2364 switch (pi->mode) {
2365 case L2CAP_MODE_STREAMING:
2366 case L2CAP_MODE_ERTM:
2367 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2368 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2369 return -ECONNREFUSED;
2370 break;
2371 default:
2372 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2373 break;
2374 }
2375
2376 done:
2377 if (pi->mode != rfc.mode) {
2378 result = L2CAP_CONF_UNACCEPT;
2379 rfc.mode = pi->mode;
2380
2381 if (pi->num_conf_rsp == 1)
2382 return -ECONNREFUSED;
2383
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2386 }
2387
2388
2389 if (result == L2CAP_CONF_SUCCESS) {
2390 /* Configure output options and let the other side know
2391 * which ones we don't like. */
2392
2393 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2394 result = L2CAP_CONF_UNACCEPT;
2395 else {
2396 pi->omtu = mtu;
2397 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2398 }
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2400
2401 switch (rfc.mode) {
2402 case L2CAP_MODE_BASIC:
2403 pi->fcs = L2CAP_FCS_NONE;
2404 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2405 break;
2406
2407 case L2CAP_MODE_ERTM:
2408 pi->remote_tx_win = rfc.txwin_size;
2409 pi->remote_max_tx = rfc.max_transmit;
2410 pi->max_pdu_size = rfc.max_pdu_size;
2411
2412 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2413 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2414
2415 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2416
2417 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2418 sizeof(rfc), (unsigned long) &rfc);
2419
2420 break;
2421
2422 case L2CAP_MODE_STREAMING:
2423 pi->remote_tx_win = rfc.txwin_size;
2424 pi->max_pdu_size = rfc.max_pdu_size;
2425
2426 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2427
2428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2429 sizeof(rfc), (unsigned long) &rfc);
2430
2431 break;
2432
2433 default:
2434 result = L2CAP_CONF_UNACCEPT;
2435
2436 memset(&rfc, 0, sizeof(rfc));
2437 rfc.mode = pi->mode;
2438 }
2439
2440 if (result == L2CAP_CONF_SUCCESS)
2441 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2442 }
2443 rsp->scid = cpu_to_le16(pi->dcid);
2444 rsp->result = cpu_to_le16(result);
2445 rsp->flags = cpu_to_le16(0x0000);
2446
2447 return ptr - data;
2448 }
2449
2450 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2451 {
2452 struct l2cap_pinfo *pi = l2cap_pi(sk);
2453 struct l2cap_conf_req *req = data;
2454 void *ptr = req->data;
2455 int type, olen;
2456 unsigned long val;
2457 struct l2cap_conf_rfc rfc;
2458
2459 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2460
2461 while (len >= L2CAP_CONF_OPT_SIZE) {
2462 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2463
2464 switch (type) {
2465 case L2CAP_CONF_MTU:
2466 if (val < L2CAP_DEFAULT_MIN_MTU) {
2467 *result = L2CAP_CONF_UNACCEPT;
2468 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2469 } else
2470 pi->omtu = val;
2471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2472 break;
2473
2474 case L2CAP_CONF_FLUSH_TO:
2475 pi->flush_to = val;
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2477 2, pi->flush_to);
2478 break;
2479
2480 case L2CAP_CONF_RFC:
2481 if (olen == sizeof(rfc))
2482 memcpy(&rfc, (void *)val, olen);
2483
2484 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2485 rfc.mode != pi->mode)
2486 return -ECONNREFUSED;
2487
2488 pi->mode = rfc.mode;
2489 pi->fcs = 0;
2490
2491 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2492 sizeof(rfc), (unsigned long) &rfc);
2493 break;
2494 }
2495 }
2496
2497 if (*result == L2CAP_CONF_SUCCESS) {
2498 switch (rfc.mode) {
2499 case L2CAP_MODE_ERTM:
2500 pi->remote_tx_win = rfc.txwin_size;
2501 pi->retrans_timeout = rfc.retrans_timeout;
2502 pi->monitor_timeout = rfc.monitor_timeout;
2503 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2504 break;
2505 case L2CAP_MODE_STREAMING:
2506 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2507 break;
2508 }
2509 }
2510
2511 req->dcid = cpu_to_le16(pi->dcid);
2512 req->flags = cpu_to_le16(0x0000);
2513
2514 return ptr - data;
2515 }
2516
2517 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2518 {
2519 struct l2cap_conf_rsp *rsp = data;
2520 void *ptr = rsp->data;
2521
2522 BT_DBG("sk %p", sk);
2523
2524 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2525 rsp->result = cpu_to_le16(result);
2526 rsp->flags = cpu_to_le16(flags);
2527
2528 return ptr - data;
2529 }
2530
2531 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2532 {
2533 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2534
2535 if (rej->reason != 0x0000)
2536 return 0;
2537
2538 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2539 cmd->ident == conn->info_ident) {
2540 del_timer(&conn->info_timer);
2541
2542 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2543 conn->info_ident = 0;
2544
2545 l2cap_conn_start(conn);
2546 }
2547
2548 return 0;
2549 }
2550
2551 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2552 {
2553 struct l2cap_chan_list *list = &conn->chan_list;
2554 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2555 struct l2cap_conn_rsp rsp;
2556 struct sock *sk, *parent;
2557 int result, status = L2CAP_CS_NO_INFO;
2558
2559 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2560 __le16 psm = req->psm;
2561
2562 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2563
2564 /* Check if we have socket listening on psm */
2565 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2566 if (!parent) {
2567 result = L2CAP_CR_BAD_PSM;
2568 goto sendresp;
2569 }
2570
2571 /* Check if the ACL is secure enough (if not SDP) */
2572 if (psm != cpu_to_le16(0x0001) &&
2573 !hci_conn_check_link_mode(conn->hcon)) {
2574 conn->disc_reason = 0x05;
2575 result = L2CAP_CR_SEC_BLOCK;
2576 goto response;
2577 }
2578
2579 result = L2CAP_CR_NO_MEM;
2580
2581 /* Check for backlog size */
2582 if (sk_acceptq_is_full(parent)) {
2583 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2584 goto response;
2585 }
2586
2587 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2588 if (!sk)
2589 goto response;
2590
2591 write_lock_bh(&list->lock);
2592
2593 /* Check if we already have channel with that dcid */
2594 if (__l2cap_get_chan_by_dcid(list, scid)) {
2595 write_unlock_bh(&list->lock);
2596 sock_set_flag(sk, SOCK_ZAPPED);
2597 l2cap_sock_kill(sk);
2598 goto response;
2599 }
2600
2601 hci_conn_hold(conn->hcon);
2602
2603 l2cap_sock_init(sk, parent);
2604 bacpy(&bt_sk(sk)->src, conn->src);
2605 bacpy(&bt_sk(sk)->dst, conn->dst);
2606 l2cap_pi(sk)->psm = psm;
2607 l2cap_pi(sk)->dcid = scid;
2608
2609 __l2cap_chan_add(conn, sk, parent);
2610 dcid = l2cap_pi(sk)->scid;
2611
2612 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2613
2614 l2cap_pi(sk)->ident = cmd->ident;
2615
2616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2617 if (l2cap_check_security(sk)) {
2618 if (bt_sk(sk)->defer_setup) {
2619 sk->sk_state = BT_CONNECT2;
2620 result = L2CAP_CR_PEND;
2621 status = L2CAP_CS_AUTHOR_PEND;
2622 parent->sk_data_ready(parent, 0);
2623 } else {
2624 sk->sk_state = BT_CONFIG;
2625 result = L2CAP_CR_SUCCESS;
2626 status = L2CAP_CS_NO_INFO;
2627 }
2628 } else {
2629 sk->sk_state = BT_CONNECT2;
2630 result = L2CAP_CR_PEND;
2631 status = L2CAP_CS_AUTHEN_PEND;
2632 }
2633 } else {
2634 sk->sk_state = BT_CONNECT2;
2635 result = L2CAP_CR_PEND;
2636 status = L2CAP_CS_NO_INFO;
2637 }
2638
2639 write_unlock_bh(&list->lock);
2640
2641 response:
2642 bh_unlock_sock(parent);
2643
2644 sendresp:
2645 rsp.scid = cpu_to_le16(scid);
2646 rsp.dcid = cpu_to_le16(dcid);
2647 rsp.result = cpu_to_le16(result);
2648 rsp.status = cpu_to_le16(status);
2649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2650
2651 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2652 struct l2cap_info_req info;
2653 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2654
2655 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2656 conn->info_ident = l2cap_get_ident(conn);
2657
2658 mod_timer(&conn->info_timer, jiffies +
2659 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2660
2661 l2cap_send_cmd(conn, conn->info_ident,
2662 L2CAP_INFO_REQ, sizeof(info), &info);
2663 }
2664
2665 return 0;
2666 }
2667
2668 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 {
2670 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2671 u16 scid, dcid, result, status;
2672 struct sock *sk;
2673 u8 req[128];
2674
2675 scid = __le16_to_cpu(rsp->scid);
2676 dcid = __le16_to_cpu(rsp->dcid);
2677 result = __le16_to_cpu(rsp->result);
2678 status = __le16_to_cpu(rsp->status);
2679
2680 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2681
2682 if (scid) {
2683 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2684 if (!sk)
2685 return 0;
2686 } else {
2687 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2688 if (!sk)
2689 return 0;
2690 }
2691
2692 switch (result) {
2693 case L2CAP_CR_SUCCESS:
2694 sk->sk_state = BT_CONFIG;
2695 l2cap_pi(sk)->ident = 0;
2696 l2cap_pi(sk)->dcid = dcid;
2697 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2698
2699 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2700
2701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2702 l2cap_build_conf_req(sk, req), req);
2703 l2cap_pi(sk)->num_conf_req++;
2704 break;
2705
2706 case L2CAP_CR_PEND:
2707 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2708 break;
2709
2710 default:
2711 l2cap_chan_del(sk, ECONNREFUSED);
2712 break;
2713 }
2714
2715 bh_unlock_sock(sk);
2716 return 0;
2717 }
2718
2719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2720 {
2721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2722 u16 dcid, flags;
2723 u8 rsp[64];
2724 struct sock *sk;
2725 int len;
2726
2727 dcid = __le16_to_cpu(req->dcid);
2728 flags = __le16_to_cpu(req->flags);
2729
2730 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2731
2732 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2733 if (!sk)
2734 return -ENOENT;
2735
2736 if (sk->sk_state == BT_DISCONN)
2737 goto unlock;
2738
2739 /* Reject if config buffer is too small. */
2740 len = cmd_len - sizeof(*req);
2741 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2742 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2743 l2cap_build_conf_rsp(sk, rsp,
2744 L2CAP_CONF_REJECT, flags), rsp);
2745 goto unlock;
2746 }
2747
2748 /* Store config. */
2749 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2750 l2cap_pi(sk)->conf_len += len;
2751
2752 if (flags & 0x0001) {
2753 /* Incomplete config. Send empty response. */
2754 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2755 l2cap_build_conf_rsp(sk, rsp,
2756 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2757 goto unlock;
2758 }
2759
2760 /* Complete config. */
2761 len = l2cap_parse_conf_req(sk, rsp);
2762 if (len < 0) {
2763 l2cap_send_disconn_req(conn, sk);
2764 goto unlock;
2765 }
2766
2767 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2768 l2cap_pi(sk)->num_conf_rsp++;
2769
2770 /* Reset config buffer. */
2771 l2cap_pi(sk)->conf_len = 0;
2772
2773 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2774 goto unlock;
2775
2776 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2777 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2778 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2779 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2780
2781 sk->sk_state = BT_CONNECTED;
2782
2783 l2cap_pi(sk)->next_tx_seq = 0;
2784 l2cap_pi(sk)->expected_tx_seq = 0;
2785 __skb_queue_head_init(TX_QUEUE(sk));
2786 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2787 l2cap_ertm_init(sk);
2788
2789 l2cap_chan_ready(sk);
2790 goto unlock;
2791 }
2792
2793 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2794 u8 buf[64];
2795 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2796 l2cap_build_conf_req(sk, buf), buf);
2797 l2cap_pi(sk)->num_conf_req++;
2798 }
2799
2800 unlock:
2801 bh_unlock_sock(sk);
2802 return 0;
2803 }
2804
2805 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806 {
2807 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2808 u16 scid, flags, result;
2809 struct sock *sk;
2810
2811 scid = __le16_to_cpu(rsp->scid);
2812 flags = __le16_to_cpu(rsp->flags);
2813 result = __le16_to_cpu(rsp->result);
2814
2815 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2816 scid, flags, result);
2817
2818 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2819 if (!sk)
2820 return 0;
2821
2822 switch (result) {
2823 case L2CAP_CONF_SUCCESS:
2824 break;
2825
2826 case L2CAP_CONF_UNACCEPT:
2827 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2828 int len = cmd->len - sizeof(*rsp);
2829 char req[64];
2830
2831 /* throw out any old stored conf requests */
2832 result = L2CAP_CONF_SUCCESS;
2833 len = l2cap_parse_conf_rsp(sk, rsp->data,
2834 len, req, &result);
2835 if (len < 0) {
2836 l2cap_send_disconn_req(conn, sk);
2837 goto done;
2838 }
2839
2840 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2841 L2CAP_CONF_REQ, len, req);
2842 l2cap_pi(sk)->num_conf_req++;
2843 if (result != L2CAP_CONF_SUCCESS)
2844 goto done;
2845 break;
2846 }
2847
2848 default:
2849 sk->sk_state = BT_DISCONN;
2850 sk->sk_err = ECONNRESET;
2851 l2cap_sock_set_timer(sk, HZ * 5);
2852 l2cap_send_disconn_req(conn, sk);
2853 goto done;
2854 }
2855
2856 if (flags & 0x01)
2857 goto done;
2858
2859 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2860
2861 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2862 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2863 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2864 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2865
2866 sk->sk_state = BT_CONNECTED;
2867 l2cap_pi(sk)->next_tx_seq = 0;
2868 l2cap_pi(sk)->expected_tx_seq = 0;
2869 __skb_queue_head_init(TX_QUEUE(sk));
2870 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2871 l2cap_ertm_init(sk);
2872
2873 l2cap_chan_ready(sk);
2874 }
2875
2876 done:
2877 bh_unlock_sock(sk);
2878 return 0;
2879 }
2880
2881 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2882 {
2883 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2884 struct l2cap_disconn_rsp rsp;
2885 u16 dcid, scid;
2886 struct sock *sk;
2887
2888 scid = __le16_to_cpu(req->scid);
2889 dcid = __le16_to_cpu(req->dcid);
2890
2891 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2892
2893 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2894 if (!sk)
2895 return 0;
2896
2897 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2898 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2899 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2900
2901 sk->sk_shutdown = SHUTDOWN_MASK;
2902
2903 skb_queue_purge(TX_QUEUE(sk));
2904
2905 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2906 skb_queue_purge(SREJ_QUEUE(sk));
2907 del_timer(&l2cap_pi(sk)->retrans_timer);
2908 del_timer(&l2cap_pi(sk)->monitor_timer);
2909 }
2910
2911 l2cap_chan_del(sk, ECONNRESET);
2912 bh_unlock_sock(sk);
2913
2914 l2cap_sock_kill(sk);
2915 return 0;
2916 }
2917
2918 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2919 {
2920 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2921 u16 dcid, scid;
2922 struct sock *sk;
2923
2924 scid = __le16_to_cpu(rsp->scid);
2925 dcid = __le16_to_cpu(rsp->dcid);
2926
2927 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2928
2929 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2930 if (!sk)
2931 return 0;
2932
2933 skb_queue_purge(TX_QUEUE(sk));
2934
2935 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2936 skb_queue_purge(SREJ_QUEUE(sk));
2937 del_timer(&l2cap_pi(sk)->retrans_timer);
2938 del_timer(&l2cap_pi(sk)->monitor_timer);
2939 }
2940
2941 l2cap_chan_del(sk, 0);
2942 bh_unlock_sock(sk);
2943
2944 l2cap_sock_kill(sk);
2945 return 0;
2946 }
2947
2948 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2949 {
2950 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2951 u16 type;
2952
2953 type = __le16_to_cpu(req->type);
2954
2955 BT_DBG("type 0x%4.4x", type);
2956
2957 if (type == L2CAP_IT_FEAT_MASK) {
2958 u8 buf[8];
2959 u32 feat_mask = l2cap_feat_mask;
2960 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2961 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2962 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2963 if (enable_ertm)
2964 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2965 | L2CAP_FEAT_FCS;
2966 put_unaligned_le32(feat_mask, rsp->data);
2967 l2cap_send_cmd(conn, cmd->ident,
2968 L2CAP_INFO_RSP, sizeof(buf), buf);
2969 } else if (type == L2CAP_IT_FIXED_CHAN) {
2970 u8 buf[12];
2971 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2972 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2973 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2974 memcpy(buf + 4, l2cap_fixed_chan, 8);
2975 l2cap_send_cmd(conn, cmd->ident,
2976 L2CAP_INFO_RSP, sizeof(buf), buf);
2977 } else {
2978 struct l2cap_info_rsp rsp;
2979 rsp.type = cpu_to_le16(type);
2980 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2981 l2cap_send_cmd(conn, cmd->ident,
2982 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2983 }
2984
2985 return 0;
2986 }
2987
2988 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2989 {
2990 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2991 u16 type, result;
2992
2993 type = __le16_to_cpu(rsp->type);
2994 result = __le16_to_cpu(rsp->result);
2995
2996 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2997
2998 del_timer(&conn->info_timer);
2999
3000 if (type == L2CAP_IT_FEAT_MASK) {
3001 conn->feat_mask = get_unaligned_le32(rsp->data);
3002
3003 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3004 struct l2cap_info_req req;
3005 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3006
3007 conn->info_ident = l2cap_get_ident(conn);
3008
3009 l2cap_send_cmd(conn, conn->info_ident,
3010 L2CAP_INFO_REQ, sizeof(req), &req);
3011 } else {
3012 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3013 conn->info_ident = 0;
3014
3015 l2cap_conn_start(conn);
3016 }
3017 } else if (type == L2CAP_IT_FIXED_CHAN) {
3018 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3019 conn->info_ident = 0;
3020
3021 l2cap_conn_start(conn);
3022 }
3023
3024 return 0;
3025 }
3026
3027 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3028 {
3029 u8 *data = skb->data;
3030 int len = skb->len;
3031 struct l2cap_cmd_hdr cmd;
3032 int err = 0;
3033
3034 l2cap_raw_recv(conn, skb);
3035
3036 while (len >= L2CAP_CMD_HDR_SIZE) {
3037 u16 cmd_len;
3038 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3039 data += L2CAP_CMD_HDR_SIZE;
3040 len -= L2CAP_CMD_HDR_SIZE;
3041
3042 cmd_len = le16_to_cpu(cmd.len);
3043
3044 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3045
3046 if (cmd_len > len || !cmd.ident) {
3047 BT_DBG("corrupted command");
3048 break;
3049 }
3050
3051 switch (cmd.code) {
3052 case L2CAP_COMMAND_REJ:
3053 l2cap_command_rej(conn, &cmd, data);
3054 break;
3055
3056 case L2CAP_CONN_REQ:
3057 err = l2cap_connect_req(conn, &cmd, data);
3058 break;
3059
3060 case L2CAP_CONN_RSP:
3061 err = l2cap_connect_rsp(conn, &cmd, data);
3062 break;
3063
3064 case L2CAP_CONF_REQ:
3065 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3066 break;
3067
3068 case L2CAP_CONF_RSP:
3069 err = l2cap_config_rsp(conn, &cmd, data);
3070 break;
3071
3072 case L2CAP_DISCONN_REQ:
3073 err = l2cap_disconnect_req(conn, &cmd, data);
3074 break;
3075
3076 case L2CAP_DISCONN_RSP:
3077 err = l2cap_disconnect_rsp(conn, &cmd, data);
3078 break;
3079
3080 case L2CAP_ECHO_REQ:
3081 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3082 break;
3083
3084 case L2CAP_ECHO_RSP:
3085 break;
3086
3087 case L2CAP_INFO_REQ:
3088 err = l2cap_information_req(conn, &cmd, data);
3089 break;
3090
3091 case L2CAP_INFO_RSP:
3092 err = l2cap_information_rsp(conn, &cmd, data);
3093 break;
3094
3095 default:
3096 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3097 err = -EINVAL;
3098 break;
3099 }
3100
3101 if (err) {
3102 struct l2cap_cmd_rej rej;
3103 BT_DBG("error %d", err);
3104
3105 /* FIXME: Map err to a valid reason */
3106 rej.reason = cpu_to_le16(0);
3107 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3108 }
3109
3110 data += cmd_len;
3111 len -= cmd_len;
3112 }
3113
3114 kfree_skb(skb);
3115 }
3116
3117 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3118 {
3119 u16 our_fcs, rcv_fcs;
3120 int hdr_size = L2CAP_HDR_SIZE + 2;
3121
3122 if (pi->fcs == L2CAP_FCS_CRC16) {
3123 skb_trim(skb, skb->len - 2);
3124 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3125 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3126
3127 if (our_fcs != rcv_fcs)
3128 return -EINVAL;
3129 }
3130 return 0;
3131 }
3132
3133 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3134 {
3135 struct sk_buff *next_skb;
3136
3137 bt_cb(skb)->tx_seq = tx_seq;
3138 bt_cb(skb)->sar = sar;
3139
3140 next_skb = skb_peek(SREJ_QUEUE(sk));
3141 if (!next_skb) {
3142 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3143 return;
3144 }
3145
3146 do {
3147 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3148 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3149 return;
3150 }
3151
3152 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3153 break;
3154
3155 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3156
3157 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3158 }
3159
3160 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3161 {
3162 struct l2cap_pinfo *pi = l2cap_pi(sk);
3163 struct sk_buff *_skb;
3164 int err = -EINVAL;
3165
3166 switch (control & L2CAP_CTRL_SAR) {
3167 case L2CAP_SDU_UNSEGMENTED:
3168 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3169 kfree_skb(pi->sdu);
3170 break;
3171 }
3172
3173 err = sock_queue_rcv_skb(sk, skb);
3174 if (!err)
3175 return 0;
3176
3177 break;
3178
3179 case L2CAP_SDU_START:
3180 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3181 kfree_skb(pi->sdu);
3182 break;
3183 }
3184
3185 pi->sdu_len = get_unaligned_le16(skb->data);
3186 skb_pull(skb, 2);
3187
3188 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3189 if (!pi->sdu) {
3190 err = -ENOMEM;
3191 break;
3192 }
3193
3194 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3195
3196 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3197 pi->partial_sdu_len = skb->len;
3198 err = 0;
3199 break;
3200
3201 case L2CAP_SDU_CONTINUE:
3202 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3203 break;
3204
3205 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3206
3207 pi->partial_sdu_len += skb->len;
3208 if (pi->partial_sdu_len > pi->sdu_len)
3209 kfree_skb(pi->sdu);
3210 else
3211 err = 0;
3212
3213 break;
3214
3215 case L2CAP_SDU_END:
3216 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3217 break;
3218
3219 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3220
3221 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3222 pi->partial_sdu_len += skb->len;
3223
3224 if (pi->partial_sdu_len == pi->sdu_len) {
3225 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3226 err = sock_queue_rcv_skb(sk, _skb);
3227 if (err < 0)
3228 kfree_skb(_skb);
3229 }
3230 kfree_skb(pi->sdu);
3231 err = 0;
3232
3233 break;
3234 }
3235
3236 kfree_skb(skb);
3237 return err;
3238 }
3239
3240 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3241 {
3242 struct sk_buff *skb;
3243 u16 control = 0;
3244
3245 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3246 if (bt_cb(skb)->tx_seq != tx_seq)
3247 break;
3248
3249 skb = skb_dequeue(SREJ_QUEUE(sk));
3250 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3251 l2cap_sar_reassembly_sdu(sk, skb, control);
3252 l2cap_pi(sk)->buffer_seq_srej =
3253 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3254 tx_seq++;
3255 }
3256 }
3257
3258 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3259 {
3260 struct l2cap_pinfo *pi = l2cap_pi(sk);
3261 struct srej_list *l, *tmp;
3262 u16 control;
3263
3264 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3265 if (l->tx_seq == tx_seq) {
3266 list_del(&l->list);
3267 kfree(l);
3268 return;
3269 }
3270 control = L2CAP_SUPER_SELECT_REJECT;
3271 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3272 l2cap_send_sframe(pi, control);
3273 list_del(&l->list);
3274 list_add_tail(&l->list, SREJ_LIST(sk));
3275 }
3276 }
3277
3278 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3279 {
3280 struct l2cap_pinfo *pi = l2cap_pi(sk);
3281 struct srej_list *new;
3282 u16 control;
3283
3284 while (tx_seq != pi->expected_tx_seq) {
3285 control = L2CAP_SUPER_SELECT_REJECT;
3286 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3287 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3288 control |= L2CAP_CTRL_POLL;
3289 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3290 }
3291 l2cap_send_sframe(pi, control);
3292
3293 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3294 new->tx_seq = pi->expected_tx_seq++;
3295 list_add_tail(&new->list, SREJ_LIST(sk));
3296 }
3297 pi->expected_tx_seq++;
3298 }
3299
3300 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3301 {
3302 struct l2cap_pinfo *pi = l2cap_pi(sk);
3303 u8 tx_seq = __get_txseq(rx_control);
3304 u8 req_seq = __get_reqseq(rx_control);
3305 u16 tx_control = 0;
3306 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3307 int err = 0;
3308
3309 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3310
3311 pi->expected_ack_seq = req_seq;
3312 l2cap_drop_acked_frames(sk);
3313
3314 if (tx_seq == pi->expected_tx_seq)
3315 goto expected;
3316
3317 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3318 struct srej_list *first;
3319
3320 first = list_first_entry(SREJ_LIST(sk),
3321 struct srej_list, list);
3322 if (tx_seq == first->tx_seq) {
3323 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3324 l2cap_check_srej_gap(sk, tx_seq);
3325
3326 list_del(&first->list);
3327 kfree(first);
3328
3329 if (list_empty(SREJ_LIST(sk))) {
3330 pi->buffer_seq = pi->buffer_seq_srej;
3331 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3332 }
3333 } else {
3334 struct srej_list *l;
3335 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3336
3337 list_for_each_entry(l, SREJ_LIST(sk), list) {
3338 if (l->tx_seq == tx_seq) {
3339 l2cap_resend_srejframe(sk, tx_seq);
3340 return 0;
3341 }
3342 }
3343 l2cap_send_srejframe(sk, tx_seq);
3344 }
3345 } else {
3346 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3347
3348 INIT_LIST_HEAD(SREJ_LIST(sk));
3349 pi->buffer_seq_srej = pi->buffer_seq;
3350
3351 __skb_queue_head_init(SREJ_QUEUE(sk));
3352 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3353
3354 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3355
3356 l2cap_send_srejframe(sk, tx_seq);
3357 }
3358 return 0;
3359
3360 expected:
3361 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3362
3363 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3364 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3365 return 0;
3366 }
3367
3368 if (rx_control & L2CAP_CTRL_FINAL) {
3369 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3370 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3371 else {
3372 sk->sk_send_head = TX_QUEUE(sk)->next;
3373 pi->next_tx_seq = pi->expected_ack_seq;
3374 l2cap_ertm_send(sk);
3375 }
3376 }
3377
3378 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3379
3380 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3381 if (err < 0)
3382 return err;
3383
3384 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3385 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3386 tx_control |= L2CAP_SUPER_RCV_READY;
3387 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3388 l2cap_send_sframe(pi, tx_control);
3389 }
3390 return 0;
3391 }
3392
3393 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3394 {
3395 struct l2cap_pinfo *pi = l2cap_pi(sk);
3396 u8 tx_seq = __get_reqseq(rx_control);
3397
3398 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3399
3400 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3401 case L2CAP_SUPER_RCV_READY:
3402 if (rx_control & L2CAP_CTRL_POLL) {
3403 u16 control = L2CAP_CTRL_FINAL;
3404 control |= L2CAP_SUPER_RCV_READY |
3405 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3406 l2cap_send_sframe(l2cap_pi(sk), control);
3407 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408
3409 } else if (rx_control & L2CAP_CTRL_FINAL) {
3410 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3411 pi->expected_ack_seq = tx_seq;
3412 l2cap_drop_acked_frames(sk);
3413
3414 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3415 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3416 else {
3417 sk->sk_send_head = TX_QUEUE(sk)->next;
3418 pi->next_tx_seq = pi->expected_ack_seq;
3419 l2cap_ertm_send(sk);
3420 }
3421
3422 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3423 break;
3424
3425 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3426 del_timer(&pi->monitor_timer);
3427
3428 if (pi->unacked_frames > 0)
3429 __mod_retrans_timer();
3430 } else {
3431 pi->expected_ack_seq = tx_seq;
3432 l2cap_drop_acked_frames(sk);
3433
3434 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3435 (pi->unacked_frames > 0))
3436 __mod_retrans_timer();
3437
3438 l2cap_ertm_send(sk);
3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3440 }
3441 break;
3442
3443 case L2CAP_SUPER_REJECT:
3444 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3445
3446 pi->expected_ack_seq = __get_reqseq(rx_control);
3447 l2cap_drop_acked_frames(sk);
3448
3449 if (rx_control & L2CAP_CTRL_FINAL) {
3450 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3451 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3452 else {
3453 sk->sk_send_head = TX_QUEUE(sk)->next;
3454 pi->next_tx_seq = pi->expected_ack_seq;
3455 l2cap_ertm_send(sk);
3456 }
3457 } else {
3458 sk->sk_send_head = TX_QUEUE(sk)->next;
3459 pi->next_tx_seq = pi->expected_ack_seq;
3460 l2cap_ertm_send(sk);
3461
3462 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3463 pi->srej_save_reqseq = tx_seq;
3464 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3465 }
3466 }
3467
3468 break;
3469
3470 case L2CAP_SUPER_SELECT_REJECT:
3471 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3472
3473 if (rx_control & L2CAP_CTRL_POLL) {
3474 l2cap_retransmit_frame(sk, tx_seq);
3475 pi->expected_ack_seq = tx_seq;
3476 l2cap_drop_acked_frames(sk);
3477 l2cap_ertm_send(sk);
3478 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3479 pi->srej_save_reqseq = tx_seq;
3480 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3481 }
3482 } else if (rx_control & L2CAP_CTRL_FINAL) {
3483 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3484 pi->srej_save_reqseq == tx_seq)
3485 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3486 else
3487 l2cap_retransmit_frame(sk, tx_seq);
3488 }
3489 else {
3490 l2cap_retransmit_frame(sk, tx_seq);
3491 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3492 pi->srej_save_reqseq = tx_seq;
3493 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3494 }
3495 }
3496 break;
3497
3498 case L2CAP_SUPER_RCV_NOT_READY:
3499 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3500 pi->expected_ack_seq = tx_seq;
3501 l2cap_drop_acked_frames(sk);
3502
3503 del_timer(&l2cap_pi(sk)->retrans_timer);
3504 if (rx_control & L2CAP_CTRL_POLL) {
3505 u16 control = L2CAP_CTRL_FINAL;
3506 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3507 }
3508 break;
3509 }
3510
3511 return 0;
3512 }
3513
3514 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3515 {
3516 struct sock *sk;
3517 struct l2cap_pinfo *pi;
3518 u16 control, len;
3519 u8 tx_seq;
3520 int err;
3521
3522 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3523 if (!sk) {
3524 BT_DBG("unknown cid 0x%4.4x", cid);
3525 goto drop;
3526 }
3527
3528 pi = l2cap_pi(sk);
3529
3530 BT_DBG("sk %p, len %d", sk, skb->len);
3531
3532 if (sk->sk_state != BT_CONNECTED)
3533 goto drop;
3534
3535 switch (pi->mode) {
3536 case L2CAP_MODE_BASIC:
3537 /* If socket recv buffers overflows we drop data here
3538 * which is *bad* because L2CAP has to be reliable.
3539 * But we don't have any other choice. L2CAP doesn't
3540 * provide flow control mechanism. */
3541
3542 if (pi->imtu < skb->len)
3543 goto drop;
3544
3545 if (!sock_queue_rcv_skb(sk, skb))
3546 goto done;
3547 break;
3548
3549 case L2CAP_MODE_ERTM:
3550 control = get_unaligned_le16(skb->data);
3551 skb_pull(skb, 2);
3552 len = skb->len;
3553
3554 if (__is_sar_start(control))
3555 len -= 2;
3556
3557 if (pi->fcs == L2CAP_FCS_CRC16)
3558 len -= 2;
3559
3560 /*
3561 * We can just drop the corrupted I-frame here.
3562 * Receiver will miss it and start proper recovery
3563 * procedures and ask retransmission.
3564 */
3565 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3566 goto drop;
3567
3568 if (l2cap_check_fcs(pi, skb))
3569 goto drop;
3570
3571 if (__is_iframe(control))
3572 err = l2cap_data_channel_iframe(sk, control, skb);
3573 else
3574 err = l2cap_data_channel_sframe(sk, control, skb);
3575
3576 if (!err)
3577 goto done;
3578 break;
3579
3580 case L2CAP_MODE_STREAMING:
3581 control = get_unaligned_le16(skb->data);
3582 skb_pull(skb, 2);
3583 len = skb->len;
3584
3585 if (__is_sar_start(control))
3586 len -= 2;
3587
3588 if (pi->fcs == L2CAP_FCS_CRC16)
3589 len -= 2;
3590
3591 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3592 goto drop;
3593
3594 if (l2cap_check_fcs(pi, skb))
3595 goto drop;
3596
3597 tx_seq = __get_txseq(control);
3598
3599 if (pi->expected_tx_seq == tx_seq)
3600 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3601 else
3602 pi->expected_tx_seq = tx_seq + 1;
3603
3604 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3605
3606 goto done;
3607
3608 default:
3609 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3610 break;
3611 }
3612
3613 drop:
3614 kfree_skb(skb);
3615
3616 done:
3617 if (sk)
3618 bh_unlock_sock(sk);
3619
3620 return 0;
3621 }
3622
3623 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3624 {
3625 struct sock *sk;
3626
3627 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3628 if (!sk)
3629 goto drop;
3630
3631 BT_DBG("sk %p, len %d", sk, skb->len);
3632
3633 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3634 goto drop;
3635
3636 if (l2cap_pi(sk)->imtu < skb->len)
3637 goto drop;
3638
3639 if (!sock_queue_rcv_skb(sk, skb))
3640 goto done;
3641
3642 drop:
3643 kfree_skb(skb);
3644
3645 done:
3646 if (sk)
3647 bh_unlock_sock(sk);
3648 return 0;
3649 }
3650
3651 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3652 {
3653 struct l2cap_hdr *lh = (void *) skb->data;
3654 u16 cid, len;
3655 __le16 psm;
3656
3657 skb_pull(skb, L2CAP_HDR_SIZE);
3658 cid = __le16_to_cpu(lh->cid);
3659 len = __le16_to_cpu(lh->len);
3660
3661 if (len != skb->len) {
3662 kfree_skb(skb);
3663 return;
3664 }
3665
3666 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3667
3668 switch (cid) {
3669 case L2CAP_CID_SIGNALING:
3670 l2cap_sig_channel(conn, skb);
3671 break;
3672
3673 case L2CAP_CID_CONN_LESS:
3674 psm = get_unaligned_le16(skb->data);
3675 skb_pull(skb, 2);
3676 l2cap_conless_channel(conn, psm, skb);
3677 break;
3678
3679 default:
3680 l2cap_data_channel(conn, cid, skb);
3681 break;
3682 }
3683 }
3684
3685 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686
3687 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3688 {
3689 int exact = 0, lm1 = 0, lm2 = 0;
3690 register struct sock *sk;
3691 struct hlist_node *node;
3692
3693 if (type != ACL_LINK)
3694 return 0;
3695
3696 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3697
3698 /* Find listening sockets and check their link_mode */
3699 read_lock(&l2cap_sk_list.lock);
3700 sk_for_each(sk, node, &l2cap_sk_list.head) {
3701 if (sk->sk_state != BT_LISTEN)
3702 continue;
3703
3704 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3705 lm1 |= HCI_LM_ACCEPT;
3706 if (l2cap_pi(sk)->role_switch)
3707 lm1 |= HCI_LM_MASTER;
3708 exact++;
3709 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3710 lm2 |= HCI_LM_ACCEPT;
3711 if (l2cap_pi(sk)->role_switch)
3712 lm2 |= HCI_LM_MASTER;
3713 }
3714 }
3715 read_unlock(&l2cap_sk_list.lock);
3716
3717 return exact ? lm1 : lm2;
3718 }
3719
3720 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3721 {
3722 struct l2cap_conn *conn;
3723
3724 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3725
3726 if (hcon->type != ACL_LINK)
3727 return 0;
3728
3729 if (!status) {
3730 conn = l2cap_conn_add(hcon, status);
3731 if (conn)
3732 l2cap_conn_ready(conn);
3733 } else
3734 l2cap_conn_del(hcon, bt_err(status));
3735
3736 return 0;
3737 }
3738
3739 static int l2cap_disconn_ind(struct hci_conn *hcon)
3740 {
3741 struct l2cap_conn *conn = hcon->l2cap_data;
3742
3743 BT_DBG("hcon %p", hcon);
3744
3745 if (hcon->type != ACL_LINK || !conn)
3746 return 0x13;
3747
3748 return conn->disc_reason;
3749 }
3750
3751 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3752 {
3753 BT_DBG("hcon %p reason %d", hcon, reason);
3754
3755 if (hcon->type != ACL_LINK)
3756 return 0;
3757
3758 l2cap_conn_del(hcon, bt_err(reason));
3759
3760 return 0;
3761 }
3762
3763 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3764 {
3765 if (sk->sk_type != SOCK_SEQPACKET)
3766 return;
3767
3768 if (encrypt == 0x00) {
3769 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3770 l2cap_sock_clear_timer(sk);
3771 l2cap_sock_set_timer(sk, HZ * 5);
3772 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3773 __l2cap_sock_close(sk, ECONNREFUSED);
3774 } else {
3775 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3776 l2cap_sock_clear_timer(sk);
3777 }
3778 }
3779
3780 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3781 {
3782 struct l2cap_chan_list *l;
3783 struct l2cap_conn *conn = hcon->l2cap_data;
3784 struct sock *sk;
3785
3786 if (!conn)
3787 return 0;
3788
3789 l = &conn->chan_list;
3790
3791 BT_DBG("conn %p", conn);
3792
3793 read_lock(&l->lock);
3794
3795 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3796 bh_lock_sock(sk);
3797
3798 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3799 bh_unlock_sock(sk);
3800 continue;
3801 }
3802
3803 if (!status && (sk->sk_state == BT_CONNECTED ||
3804 sk->sk_state == BT_CONFIG)) {
3805 l2cap_check_encryption(sk, encrypt);
3806 bh_unlock_sock(sk);
3807 continue;
3808 }
3809
3810 if (sk->sk_state == BT_CONNECT) {
3811 if (!status) {
3812 struct l2cap_conn_req req;
3813 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3814 req.psm = l2cap_pi(sk)->psm;
3815
3816 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3817
3818 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3819 L2CAP_CONN_REQ, sizeof(req), &req);
3820 } else {
3821 l2cap_sock_clear_timer(sk);
3822 l2cap_sock_set_timer(sk, HZ / 10);
3823 }
3824 } else if (sk->sk_state == BT_CONNECT2) {
3825 struct l2cap_conn_rsp rsp;
3826 __u16 result;
3827
3828 if (!status) {
3829 sk->sk_state = BT_CONFIG;
3830 result = L2CAP_CR_SUCCESS;
3831 } else {
3832 sk->sk_state = BT_DISCONN;
3833 l2cap_sock_set_timer(sk, HZ / 10);
3834 result = L2CAP_CR_SEC_BLOCK;
3835 }
3836
3837 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3838 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3839 rsp.result = cpu_to_le16(result);
3840 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3841 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3842 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3843 }
3844
3845 bh_unlock_sock(sk);
3846 }
3847
3848 read_unlock(&l->lock);
3849
3850 return 0;
3851 }
3852
3853 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3854 {
3855 struct l2cap_conn *conn = hcon->l2cap_data;
3856
3857 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3858 goto drop;
3859
3860 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3861
3862 if (flags & ACL_START) {
3863 struct l2cap_hdr *hdr;
3864 int len;
3865
3866 if (conn->rx_len) {
3867 BT_ERR("Unexpected start frame (len %d)", skb->len);
3868 kfree_skb(conn->rx_skb);
3869 conn->rx_skb = NULL;
3870 conn->rx_len = 0;
3871 l2cap_conn_unreliable(conn, ECOMM);
3872 }
3873
3874 if (skb->len < 2) {
3875 BT_ERR("Frame is too short (len %d)", skb->len);
3876 l2cap_conn_unreliable(conn, ECOMM);
3877 goto drop;
3878 }
3879
3880 hdr = (struct l2cap_hdr *) skb->data;
3881 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3882
3883 if (len == skb->len) {
3884 /* Complete frame received */
3885 l2cap_recv_frame(conn, skb);
3886 return 0;
3887 }
3888
3889 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3890
3891 if (skb->len > len) {
3892 BT_ERR("Frame is too long (len %d, expected len %d)",
3893 skb->len, len);
3894 l2cap_conn_unreliable(conn, ECOMM);
3895 goto drop;
3896 }
3897
3898 /* Allocate skb for the complete frame (with header) */
3899 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3900 if (!conn->rx_skb)
3901 goto drop;
3902
3903 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3904 skb->len);
3905 conn->rx_len = len - skb->len;
3906 } else {
3907 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3908
3909 if (!conn->rx_len) {
3910 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3911 l2cap_conn_unreliable(conn, ECOMM);
3912 goto drop;
3913 }
3914
3915 if (skb->len > conn->rx_len) {
3916 BT_ERR("Fragment is too long (len %d, expected %d)",
3917 skb->len, conn->rx_len);
3918 kfree_skb(conn->rx_skb);
3919 conn->rx_skb = NULL;
3920 conn->rx_len = 0;
3921 l2cap_conn_unreliable(conn, ECOMM);
3922 goto drop;
3923 }
3924
3925 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3926 skb->len);
3927 conn->rx_len -= skb->len;
3928
3929 if (!conn->rx_len) {
3930 /* Complete frame received */
3931 l2cap_recv_frame(conn, conn->rx_skb);
3932 conn->rx_skb = NULL;
3933 }
3934 }
3935
3936 drop:
3937 kfree_skb(skb);
3938 return 0;
3939 }
3940
3941 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3942 {
3943 struct sock *sk;
3944 struct hlist_node *node;
3945 char *str = buf;
3946
3947 read_lock_bh(&l2cap_sk_list.lock);
3948
3949 sk_for_each(sk, node, &l2cap_sk_list.head) {
3950 struct l2cap_pinfo *pi = l2cap_pi(sk);
3951
3952 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3953 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3954 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3955 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3956 }
3957
3958 read_unlock_bh(&l2cap_sk_list.lock);
3959
3960 return str - buf;
3961 }
3962
3963 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3964
3965 static const struct proto_ops l2cap_sock_ops = {
3966 .family = PF_BLUETOOTH,
3967 .owner = THIS_MODULE,
3968 .release = l2cap_sock_release,
3969 .bind = l2cap_sock_bind,
3970 .connect = l2cap_sock_connect,
3971 .listen = l2cap_sock_listen,
3972 .accept = l2cap_sock_accept,
3973 .getname = l2cap_sock_getname,
3974 .sendmsg = l2cap_sock_sendmsg,
3975 .recvmsg = l2cap_sock_recvmsg,
3976 .poll = bt_sock_poll,
3977 .ioctl = bt_sock_ioctl,
3978 .mmap = sock_no_mmap,
3979 .socketpair = sock_no_socketpair,
3980 .shutdown = l2cap_sock_shutdown,
3981 .setsockopt = l2cap_sock_setsockopt,
3982 .getsockopt = l2cap_sock_getsockopt
3983 };
3984
3985 static const struct net_proto_family l2cap_sock_family_ops = {
3986 .family = PF_BLUETOOTH,
3987 .owner = THIS_MODULE,
3988 .create = l2cap_sock_create,
3989 };
3990
3991 static struct hci_proto l2cap_hci_proto = {
3992 .name = "L2CAP",
3993 .id = HCI_PROTO_L2CAP,
3994 .connect_ind = l2cap_connect_ind,
3995 .connect_cfm = l2cap_connect_cfm,
3996 .disconn_ind = l2cap_disconn_ind,
3997 .disconn_cfm = l2cap_disconn_cfm,
3998 .security_cfm = l2cap_security_cfm,
3999 .recv_acldata = l2cap_recv_acldata
4000 };
4001
4002 static int __init l2cap_init(void)
4003 {
4004 int err;
4005
4006 err = proto_register(&l2cap_proto, 0);
4007 if (err < 0)
4008 return err;
4009
4010 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4011 if (err < 0) {
4012 BT_ERR("L2CAP socket registration failed");
4013 goto error;
4014 }
4015
4016 err = hci_register_proto(&l2cap_hci_proto);
4017 if (err < 0) {
4018 BT_ERR("L2CAP protocol registration failed");
4019 bt_sock_unregister(BTPROTO_L2CAP);
4020 goto error;
4021 }
4022
4023 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4024 BT_ERR("Failed to create L2CAP info file");
4025
4026 BT_INFO("L2CAP ver %s", VERSION);
4027 BT_INFO("L2CAP socket layer initialized");
4028
4029 return 0;
4030
4031 error:
4032 proto_unregister(&l2cap_proto);
4033 return err;
4034 }
4035
4036 static void __exit l2cap_exit(void)
4037 {
4038 class_remove_file(bt_class, &class_attr_l2cap);
4039
4040 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4041 BT_ERR("L2CAP socket unregistration failed");
4042
4043 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4044 BT_ERR("L2CAP protocol unregistration failed");
4045
4046 proto_unregister(&l2cap_proto);
4047 }
4048
4049 void l2cap_load(void)
4050 {
4051 /* Dummy function to trigger automatic L2CAP module loading by
4052 * other modules that use L2CAP sockets but don't use any other
4053 * symbols from it. */
4054 return;
4055 }
4056 EXPORT_SYMBOL(l2cap_load);
4057
4058 module_init(l2cap_init);
4059 module_exit(l2cap_exit);
4060
4061 module_param(enable_ertm, bool, 0644);
4062 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4063
4064 module_param(max_transmit, uint, 0644);
4065 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4066
4067 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4068 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4069 MODULE_VERSION(VERSION);
4070 MODULE_LICENSE("GPL");
4071 MODULE_ALIAS("bt-proto-0");
This page took 0.190176 seconds and 6 git commands to generate.