Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87
88 }
89
90 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->scid == cid)
96 return c;
97 }
98 return NULL;
99 }
100
101 /* Find channel with given SCID.
102 * Returns locked socket */
103 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 read_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
109 if (c)
110 bh_lock_sock(c->sk);
111 read_unlock(&conn->chan_lock);
112 return c;
113 }
114
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
121 return c;
122 }
123 return NULL;
124 }
125
126 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127 {
128 struct l2cap_chan *c;
129
130 read_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_ident(conn, ident);
132 if (c)
133 bh_lock_sock(c->sk);
134 read_unlock(&conn->chan_lock);
135 return c;
136 }
137
138 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
139 {
140 u16 cid = L2CAP_CID_DYN_START;
141
142 for (; cid < L2CAP_CID_DYN_END; cid++) {
143 if (!__l2cap_get_chan_by_scid(conn, cid))
144 return cid;
145 }
146
147 return 0;
148 }
149
150 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
151 {
152 struct l2cap_chan *chan;
153
154 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
155 if (!chan)
156 return NULL;
157
158 chan->sk = sk;
159
160 return chan;
161 }
162
163 void l2cap_chan_free(struct l2cap_chan *chan)
164 {
165 kfree(chan);
166 }
167
168 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
169 {
170 struct sock *sk = chan->sk;
171
172 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
173 chan->psm, chan->dcid);
174
175 conn->disc_reason = 0x13;
176
177 chan->conn = conn;
178
179 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
180 if (conn->hcon->type == LE_LINK) {
181 /* LE connection */
182 chan->omtu = L2CAP_LE_DEFAULT_MTU;
183 chan->scid = L2CAP_CID_LE_DATA;
184 chan->dcid = L2CAP_CID_LE_DATA;
185 } else {
186 /* Alloc CID for connection-oriented socket */
187 chan->scid = l2cap_alloc_cid(conn);
188 chan->omtu = L2CAP_DEFAULT_MTU;
189 }
190 } else if (sk->sk_type == SOCK_DGRAM) {
191 /* Connectionless socket */
192 chan->scid = L2CAP_CID_CONN_LESS;
193 chan->dcid = L2CAP_CID_CONN_LESS;
194 chan->omtu = L2CAP_DEFAULT_MTU;
195 } else {
196 /* Raw socket can send/recv signalling messages only */
197 chan->scid = L2CAP_CID_SIGNALING;
198 chan->dcid = L2CAP_CID_SIGNALING;
199 chan->omtu = L2CAP_DEFAULT_MTU;
200 }
201
202 sock_hold(sk);
203
204 list_add(&chan->list, &conn->chan_l);
205 }
206
207 /* Delete channel.
208 * Must be called on the locked socket. */
209 void l2cap_chan_del(struct l2cap_chan *chan, int err)
210 {
211 struct sock *sk = chan->sk;
212 struct l2cap_conn *conn = chan->conn;
213 struct sock *parent = bt_sk(sk)->parent;
214
215 l2cap_sock_clear_timer(sk);
216
217 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
218
219 if (conn) {
220 /* Delete from channel list */
221 write_lock_bh(&conn->chan_lock);
222 list_del(&chan->list);
223 write_unlock_bh(&conn->chan_lock);
224 __sock_put(sk);
225
226 chan->conn = NULL;
227 hci_conn_put(conn->hcon);
228 }
229
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
232
233 if (err)
234 sk->sk_err = err;
235
236 if (parent) {
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
239 } else
240 sk->sk_state_change(sk);
241
242 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
243 chan->conf_state & L2CAP_CONF_INPUT_DONE))
244 return;
245
246 skb_queue_purge(&chan->tx_q);
247
248 if (chan->mode == L2CAP_MODE_ERTM) {
249 struct srej_list *l, *tmp;
250
251 del_timer(&chan->retrans_timer);
252 del_timer(&chan->monitor_timer);
253 del_timer(&chan->ack_timer);
254
255 skb_queue_purge(&chan->srej_q);
256 skb_queue_purge(&chan->busy_q);
257
258 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
259 list_del(&l->list);
260 kfree(l);
261 }
262 }
263 }
264
265 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
266 {
267 struct sock *sk = chan->sk;
268
269 if (sk->sk_type == SOCK_RAW) {
270 switch (chan->sec_level) {
271 case BT_SECURITY_HIGH:
272 return HCI_AT_DEDICATED_BONDING_MITM;
273 case BT_SECURITY_MEDIUM:
274 return HCI_AT_DEDICATED_BONDING;
275 default:
276 return HCI_AT_NO_BONDING;
277 }
278 } else if (chan->psm == cpu_to_le16(0x0001)) {
279 if (chan->sec_level == BT_SECURITY_LOW)
280 chan->sec_level = BT_SECURITY_SDP;
281
282 if (chan->sec_level == BT_SECURITY_HIGH)
283 return HCI_AT_NO_BONDING_MITM;
284 else
285 return HCI_AT_NO_BONDING;
286 } else {
287 switch (chan->sec_level) {
288 case BT_SECURITY_HIGH:
289 return HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 return HCI_AT_GENERAL_BONDING;
292 default:
293 return HCI_AT_NO_BONDING;
294 }
295 }
296 }
297
298 /* Service level security */
299 static inline int l2cap_check_security(struct l2cap_chan *chan)
300 {
301 struct l2cap_conn *conn = chan->conn;
302 __u8 auth_type;
303
304 auth_type = l2cap_get_auth_type(chan);
305
306 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
307 }
308
309 u8 l2cap_get_ident(struct l2cap_conn *conn)
310 {
311 u8 id;
312
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
317 */
318
319 spin_lock_bh(&conn->lock);
320
321 if (++conn->tx_ident > 128)
322 conn->tx_ident = 1;
323
324 id = conn->tx_ident;
325
326 spin_unlock_bh(&conn->lock);
327
328 return id;
329 }
330
331 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 {
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
334 u8 flags;
335
336 BT_DBG("code 0x%2.2x", code);
337
338 if (!skb)
339 return;
340
341 if (lmp_no_flush_capable(conn->hcon->hdev))
342 flags = ACL_START_NO_FLUSH;
343 else
344 flags = ACL_START;
345
346 hci_send_acl(conn->hcon, skb, flags);
347 }
348
349 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
350 {
351 struct sk_buff *skb;
352 struct l2cap_hdr *lh;
353 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *sk = (struct sock *)pi;
356 int count, hlen = L2CAP_HDR_SIZE + 2;
357 u8 flags;
358
359 if (sk->sk_state != BT_CONNECTED)
360 return;
361
362 if (chan->fcs == L2CAP_FCS_CRC16)
363 hlen += 2;
364
365 BT_DBG("chan %p, control 0x%2.2x", chan, control);
366
367 count = min_t(unsigned int, conn->mtu, hlen);
368 control |= L2CAP_CTRL_FRAME_TYPE;
369
370 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
371 control |= L2CAP_CTRL_FINAL;
372 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
373 }
374
375 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
376 control |= L2CAP_CTRL_POLL;
377 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
378 }
379
380 skb = bt_skb_alloc(count, GFP_ATOMIC);
381 if (!skb)
382 return;
383
384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
385 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
386 lh->cid = cpu_to_le16(chan->dcid);
387 put_unaligned_le16(control, skb_put(skb, 2));
388
389 if (chan->fcs == L2CAP_FCS_CRC16) {
390 u16 fcs = crc16(0, (u8 *)lh, count - 2);
391 put_unaligned_le16(fcs, skb_put(skb, 2));
392 }
393
394 if (lmp_no_flush_capable(conn->hcon->hdev))
395 flags = ACL_START_NO_FLUSH;
396 else
397 flags = ACL_START;
398
399 hci_send_acl(chan->conn->hcon, skb, flags);
400 }
401
402 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
403 {
404 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
405 control |= L2CAP_SUPER_RCV_NOT_READY;
406 chan->conn_state |= L2CAP_CONN_RNR_SENT;
407 } else
408 control |= L2CAP_SUPER_RCV_READY;
409
410 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
411
412 l2cap_send_sframe(chan, control);
413 }
414
415 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
416 {
417 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
418 }
419
420 static void l2cap_do_start(struct l2cap_chan *chan)
421 {
422 struct l2cap_conn *conn = chan->conn;
423
424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
426 return;
427
428 if (l2cap_check_security(chan) &&
429 __l2cap_no_conn_pending(chan)) {
430 struct l2cap_conn_req req;
431 req.scid = cpu_to_le16(chan->scid);
432 req.psm = chan->psm;
433
434 chan->ident = l2cap_get_ident(conn);
435 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
436
437 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
438 sizeof(req), &req);
439 }
440 } else {
441 struct l2cap_info_req req;
442 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
443
444 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
445 conn->info_ident = l2cap_get_ident(conn);
446
447 mod_timer(&conn->info_timer, jiffies +
448 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
449
450 l2cap_send_cmd(conn, conn->info_ident,
451 L2CAP_INFO_REQ, sizeof(req), &req);
452 }
453 }
454
455 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
456 {
457 u32 local_feat_mask = l2cap_feat_mask;
458 if (!disable_ertm)
459 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
460
461 switch (mode) {
462 case L2CAP_MODE_ERTM:
463 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
464 case L2CAP_MODE_STREAMING:
465 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
466 default:
467 return 0x00;
468 }
469 }
470
471 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
472 {
473 struct sock *sk;
474 struct l2cap_disconn_req req;
475
476 if (!conn)
477 return;
478
479 sk = chan->sk;
480
481 if (chan->mode == L2CAP_MODE_ERTM) {
482 del_timer(&chan->retrans_timer);
483 del_timer(&chan->monitor_timer);
484 del_timer(&chan->ack_timer);
485 }
486
487 req.dcid = cpu_to_le16(chan->dcid);
488 req.scid = cpu_to_le16(chan->scid);
489 l2cap_send_cmd(conn, l2cap_get_ident(conn),
490 L2CAP_DISCONN_REQ, sizeof(req), &req);
491
492 sk->sk_state = BT_DISCONN;
493 sk->sk_err = err;
494 }
495
496 /* ---- L2CAP connections ---- */
497 static void l2cap_conn_start(struct l2cap_conn *conn)
498 {
499 struct l2cap_chan *chan, *tmp;
500
501 BT_DBG("conn %p", conn);
502
503 read_lock(&conn->chan_lock);
504
505 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
506 struct sock *sk = chan->sk;
507
508 bh_lock_sock(sk);
509
510 if (sk->sk_type != SOCK_SEQPACKET &&
511 sk->sk_type != SOCK_STREAM) {
512 bh_unlock_sock(sk);
513 continue;
514 }
515
516 if (sk->sk_state == BT_CONNECT) {
517 struct l2cap_conn_req req;
518
519 if (!l2cap_check_security(chan) ||
520 !__l2cap_no_conn_pending(chan)) {
521 bh_unlock_sock(sk);
522 continue;
523 }
524
525 if (!l2cap_mode_supported(chan->mode,
526 conn->feat_mask)
527 && chan->conf_state &
528 L2CAP_CONF_STATE2_DEVICE) {
529 /* __l2cap_sock_close() calls list_del(chan)
530 * so release the lock */
531 read_unlock_bh(&conn->chan_lock);
532 __l2cap_sock_close(sk, ECONNRESET);
533 read_lock_bh(&conn->chan_lock);
534 bh_unlock_sock(sk);
535 continue;
536 }
537
538 req.scid = cpu_to_le16(chan->scid);
539 req.psm = chan->psm;
540
541 chan->ident = l2cap_get_ident(conn);
542 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
543
544 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
545 sizeof(req), &req);
546
547 } else if (sk->sk_state == BT_CONNECT2) {
548 struct l2cap_conn_rsp rsp;
549 char buf[128];
550 rsp.scid = cpu_to_le16(chan->dcid);
551 rsp.dcid = cpu_to_le16(chan->scid);
552
553 if (l2cap_check_security(chan)) {
554 if (bt_sk(sk)->defer_setup) {
555 struct sock *parent = bt_sk(sk)->parent;
556 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
557 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
558 parent->sk_data_ready(parent, 0);
559
560 } else {
561 sk->sk_state = BT_CONFIG;
562 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
563 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
564 }
565 } else {
566 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
567 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
568 }
569
570 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
571 sizeof(rsp), &rsp);
572
573 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
574 rsp.result != L2CAP_CR_SUCCESS) {
575 bh_unlock_sock(sk);
576 continue;
577 }
578
579 chan->conf_state |= L2CAP_CONF_REQ_SENT;
580 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
581 l2cap_build_conf_req(chan, buf), buf);
582 chan->num_conf_req++;
583 }
584
585 bh_unlock_sock(sk);
586 }
587
588 read_unlock(&conn->chan_lock);
589 }
590
591 /* Find socket with cid and source bdaddr.
592 * Returns closest match, locked.
593 */
594 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
595 {
596 struct sock *sk = NULL, *sk1 = NULL;
597 struct hlist_node *node;
598
599 read_lock(&l2cap_sk_list.lock);
600
601 sk_for_each(sk, node, &l2cap_sk_list.head) {
602 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
603
604 if (state && sk->sk_state != state)
605 continue;
606
607 if (chan->scid == cid) {
608 /* Exact match. */
609 if (!bacmp(&bt_sk(sk)->src, src))
610 break;
611
612 /* Closest match */
613 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
614 sk1 = sk;
615 }
616 }
617
618 read_unlock(&l2cap_sk_list.lock);
619
620 return node ? sk : sk1;
621 }
622
623 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
624 {
625 struct sock *parent, *sk;
626 struct l2cap_chan *chan;
627
628 BT_DBG("");
629
630 /* Check if we have socket listening on cid */
631 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
632 conn->src);
633 if (!parent)
634 return;
635
636 bh_lock_sock(parent);
637
638 /* Check for backlog size */
639 if (sk_acceptq_is_full(parent)) {
640 BT_DBG("backlog full %d", parent->sk_ack_backlog);
641 goto clean;
642 }
643
644 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
645 if (!sk)
646 goto clean;
647
648 chan = l2cap_chan_alloc(sk);
649 if (!chan) {
650 l2cap_sock_kill(sk);
651 goto clean;
652 }
653
654 l2cap_pi(sk)->chan = chan;
655
656 write_lock_bh(&conn->chan_lock);
657
658 hci_conn_hold(conn->hcon);
659
660 l2cap_sock_init(sk, parent);
661
662 bacpy(&bt_sk(sk)->src, conn->src);
663 bacpy(&bt_sk(sk)->dst, conn->dst);
664
665 bt_accept_enqueue(parent, sk);
666
667 __l2cap_chan_add(conn, chan);
668
669 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
670
671 sk->sk_state = BT_CONNECTED;
672 parent->sk_data_ready(parent, 0);
673
674 write_unlock_bh(&conn->chan_lock);
675
676 clean:
677 bh_unlock_sock(parent);
678 }
679
680 static void l2cap_conn_ready(struct l2cap_conn *conn)
681 {
682 struct l2cap_chan *chan;
683
684 BT_DBG("conn %p", conn);
685
686 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
687 l2cap_le_conn_ready(conn);
688
689 read_lock(&conn->chan_lock);
690
691 list_for_each_entry(chan, &conn->chan_l, list) {
692 struct sock *sk = chan->sk;
693
694 bh_lock_sock(sk);
695
696 if (conn->hcon->type == LE_LINK) {
697 l2cap_sock_clear_timer(sk);
698 sk->sk_state = BT_CONNECTED;
699 sk->sk_state_change(sk);
700 }
701
702 if (sk->sk_type != SOCK_SEQPACKET &&
703 sk->sk_type != SOCK_STREAM) {
704 l2cap_sock_clear_timer(sk);
705 sk->sk_state = BT_CONNECTED;
706 sk->sk_state_change(sk);
707 } else if (sk->sk_state == BT_CONNECT)
708 l2cap_do_start(chan);
709
710 bh_unlock_sock(sk);
711 }
712
713 read_unlock(&conn->chan_lock);
714 }
715
716 /* Notify sockets that we cannot guaranty reliability anymore */
717 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
718 {
719 struct l2cap_chan *chan;
720
721 BT_DBG("conn %p", conn);
722
723 read_lock(&conn->chan_lock);
724
725 list_for_each_entry(chan, &conn->chan_l, list) {
726 struct sock *sk = chan->sk;
727
728 if (chan->force_reliable)
729 sk->sk_err = err;
730 }
731
732 read_unlock(&conn->chan_lock);
733 }
734
735 static void l2cap_info_timeout(unsigned long arg)
736 {
737 struct l2cap_conn *conn = (void *) arg;
738
739 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
740 conn->info_ident = 0;
741
742 l2cap_conn_start(conn);
743 }
744
745 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
746 {
747 struct l2cap_conn *conn = hcon->l2cap_data;
748
749 if (conn || status)
750 return conn;
751
752 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
753 if (!conn)
754 return NULL;
755
756 hcon->l2cap_data = conn;
757 conn->hcon = hcon;
758
759 BT_DBG("hcon %p conn %p", hcon, conn);
760
761 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
762 conn->mtu = hcon->hdev->le_mtu;
763 else
764 conn->mtu = hcon->hdev->acl_mtu;
765
766 conn->src = &hcon->hdev->bdaddr;
767 conn->dst = &hcon->dst;
768
769 conn->feat_mask = 0;
770
771 spin_lock_init(&conn->lock);
772 rwlock_init(&conn->chan_lock);
773
774 INIT_LIST_HEAD(&conn->chan_l);
775
776 if (hcon->type != LE_LINK)
777 setup_timer(&conn->info_timer, l2cap_info_timeout,
778 (unsigned long) conn);
779
780 conn->disc_reason = 0x13;
781
782 return conn;
783 }
784
785 static void l2cap_conn_del(struct hci_conn *hcon, int err)
786 {
787 struct l2cap_conn *conn = hcon->l2cap_data;
788 struct l2cap_chan *chan, *l;
789 struct sock *sk;
790
791 if (!conn)
792 return;
793
794 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
795
796 kfree_skb(conn->rx_skb);
797
798 /* Kill channels */
799 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
800 sk = chan->sk;
801 bh_lock_sock(sk);
802 l2cap_chan_del(chan, err);
803 bh_unlock_sock(sk);
804 l2cap_sock_kill(sk);
805 }
806
807 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
808 del_timer_sync(&conn->info_timer);
809
810 hcon->l2cap_data = NULL;
811 kfree(conn);
812 }
813
814 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
815 {
816 write_lock_bh(&conn->chan_lock);
817 __l2cap_chan_add(conn, chan);
818 write_unlock_bh(&conn->chan_lock);
819 }
820
821 /* ---- Socket interface ---- */
822
823 /* Find socket with psm and source bdaddr.
824 * Returns closest match.
825 */
826 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
827 {
828 struct sock *sk = NULL, *sk1 = NULL;
829 struct hlist_node *node;
830
831 read_lock(&l2cap_sk_list.lock);
832
833 sk_for_each(sk, node, &l2cap_sk_list.head) {
834 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
835
836 if (state && sk->sk_state != state)
837 continue;
838
839 if (chan->psm == psm) {
840 /* Exact match. */
841 if (!bacmp(&bt_sk(sk)->src, src))
842 break;
843
844 /* Closest match */
845 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
846 sk1 = sk;
847 }
848 }
849
850 read_unlock(&l2cap_sk_list.lock);
851
852 return node ? sk : sk1;
853 }
854
855 int l2cap_chan_connect(struct l2cap_chan *chan)
856 {
857 struct sock *sk = chan->sk;
858 bdaddr_t *src = &bt_sk(sk)->src;
859 bdaddr_t *dst = &bt_sk(sk)->dst;
860 struct l2cap_conn *conn;
861 struct hci_conn *hcon;
862 struct hci_dev *hdev;
863 __u8 auth_type;
864 int err;
865
866 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
867 chan->psm);
868
869 hdev = hci_get_route(dst, src);
870 if (!hdev)
871 return -EHOSTUNREACH;
872
873 hci_dev_lock_bh(hdev);
874
875 auth_type = l2cap_get_auth_type(chan);
876
877 if (chan->dcid == L2CAP_CID_LE_DATA)
878 hcon = hci_connect(hdev, LE_LINK, dst,
879 chan->sec_level, auth_type);
880 else
881 hcon = hci_connect(hdev, ACL_LINK, dst,
882 chan->sec_level, auth_type);
883
884 if (IS_ERR(hcon)) {
885 err = PTR_ERR(hcon);
886 goto done;
887 }
888
889 conn = l2cap_conn_add(hcon, 0);
890 if (!conn) {
891 hci_conn_put(hcon);
892 err = -ENOMEM;
893 goto done;
894 }
895
896 /* Update source addr of the socket */
897 bacpy(src, conn->src);
898
899 l2cap_chan_add(conn, chan);
900
901 sk->sk_state = BT_CONNECT;
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903
904 if (hcon->state == BT_CONNECTED) {
905 if (sk->sk_type != SOCK_SEQPACKET &&
906 sk->sk_type != SOCK_STREAM) {
907 l2cap_sock_clear_timer(sk);
908 if (l2cap_check_security(chan))
909 sk->sk_state = BT_CONNECTED;
910 } else
911 l2cap_do_start(chan);
912 }
913
914 err = 0;
915
916 done:
917 hci_dev_unlock_bh(hdev);
918 hci_dev_put(hdev);
919 return err;
920 }
921
922 int __l2cap_wait_ack(struct sock *sk)
923 {
924 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
925 DECLARE_WAITQUEUE(wait, current);
926 int err = 0;
927 int timeo = HZ/5;
928
929 add_wait_queue(sk_sleep(sk), &wait);
930 while ((chan->unacked_frames > 0 && chan->conn)) {
931 set_current_state(TASK_INTERRUPTIBLE);
932
933 if (!timeo)
934 timeo = HZ/5;
935
936 if (signal_pending(current)) {
937 err = sock_intr_errno(timeo);
938 break;
939 }
940
941 release_sock(sk);
942 timeo = schedule_timeout(timeo);
943 lock_sock(sk);
944
945 err = sock_error(sk);
946 if (err)
947 break;
948 }
949 set_current_state(TASK_RUNNING);
950 remove_wait_queue(sk_sleep(sk), &wait);
951 return err;
952 }
953
954 static void l2cap_monitor_timeout(unsigned long arg)
955 {
956 struct l2cap_chan *chan = (void *) arg;
957 struct sock *sk = chan->sk;
958
959 BT_DBG("chan %p", chan);
960
961 bh_lock_sock(sk);
962 if (chan->retry_count >= chan->remote_max_tx) {
963 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
964 bh_unlock_sock(sk);
965 return;
966 }
967
968 chan->retry_count++;
969 __mod_monitor_timer();
970
971 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
972 bh_unlock_sock(sk);
973 }
974
975 static void l2cap_retrans_timeout(unsigned long arg)
976 {
977 struct l2cap_chan *chan = (void *) arg;
978 struct sock *sk = chan->sk;
979
980 BT_DBG("chan %p", chan);
981
982 bh_lock_sock(sk);
983 chan->retry_count = 1;
984 __mod_monitor_timer();
985
986 chan->conn_state |= L2CAP_CONN_WAIT_F;
987
988 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
989 bh_unlock_sock(sk);
990 }
991
992 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
993 {
994 struct sk_buff *skb;
995
996 while ((skb = skb_peek(&chan->tx_q)) &&
997 chan->unacked_frames) {
998 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
999 break;
1000
1001 skb = skb_dequeue(&chan->tx_q);
1002 kfree_skb(skb);
1003
1004 chan->unacked_frames--;
1005 }
1006
1007 if (!chan->unacked_frames)
1008 del_timer(&chan->retrans_timer);
1009 }
1010
1011 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1012 {
1013 struct hci_conn *hcon = chan->conn->hcon;
1014 u16 flags;
1015
1016 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1017
1018 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1019 flags = ACL_START_NO_FLUSH;
1020 else
1021 flags = ACL_START;
1022
1023 hci_send_acl(hcon, skb, flags);
1024 }
1025
1026 void l2cap_streaming_send(struct l2cap_chan *chan)
1027 {
1028 struct sk_buff *skb;
1029 u16 control, fcs;
1030
1031 while ((skb = skb_dequeue(&chan->tx_q))) {
1032 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1033 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1034 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1035
1036 if (chan->fcs == L2CAP_FCS_CRC16) {
1037 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1038 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1039 }
1040
1041 l2cap_do_send(chan, skb);
1042
1043 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1044 }
1045 }
1046
1047 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1048 {
1049 struct sk_buff *skb, *tx_skb;
1050 u16 control, fcs;
1051
1052 skb = skb_peek(&chan->tx_q);
1053 if (!skb)
1054 return;
1055
1056 do {
1057 if (bt_cb(skb)->tx_seq == tx_seq)
1058 break;
1059
1060 if (skb_queue_is_last(&chan->tx_q, skb))
1061 return;
1062
1063 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1064
1065 if (chan->remote_max_tx &&
1066 bt_cb(skb)->retries == chan->remote_max_tx) {
1067 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1068 return;
1069 }
1070
1071 tx_skb = skb_clone(skb, GFP_ATOMIC);
1072 bt_cb(skb)->retries++;
1073 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1074 control &= L2CAP_CTRL_SAR;
1075
1076 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1077 control |= L2CAP_CTRL_FINAL;
1078 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1079 }
1080
1081 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1082 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1083
1084 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1085
1086 if (chan->fcs == L2CAP_FCS_CRC16) {
1087 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1088 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1089 }
1090
1091 l2cap_do_send(chan, tx_skb);
1092 }
1093
1094 int l2cap_ertm_send(struct l2cap_chan *chan)
1095 {
1096 struct sk_buff *skb, *tx_skb;
1097 struct sock *sk = chan->sk;
1098 u16 control, fcs;
1099 int nsent = 0;
1100
1101 if (sk->sk_state != BT_CONNECTED)
1102 return -ENOTCONN;
1103
1104 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1105
1106 if (chan->remote_max_tx &&
1107 bt_cb(skb)->retries == chan->remote_max_tx) {
1108 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1109 break;
1110 }
1111
1112 tx_skb = skb_clone(skb, GFP_ATOMIC);
1113
1114 bt_cb(skb)->retries++;
1115
1116 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1117 control &= L2CAP_CTRL_SAR;
1118
1119 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1120 control |= L2CAP_CTRL_FINAL;
1121 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1122 }
1123 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1124 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1125 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1126
1127
1128 if (chan->fcs == L2CAP_FCS_CRC16) {
1129 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1130 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1131 }
1132
1133 l2cap_do_send(chan, tx_skb);
1134
1135 __mod_retrans_timer();
1136
1137 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1138 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1139
1140 if (bt_cb(skb)->retries == 1)
1141 chan->unacked_frames++;
1142
1143 chan->frames_sent++;
1144
1145 if (skb_queue_is_last(&chan->tx_q, skb))
1146 chan->tx_send_head = NULL;
1147 else
1148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1149
1150 nsent++;
1151 }
1152
1153 return nsent;
1154 }
1155
1156 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1157 {
1158 int ret;
1159
1160 if (!skb_queue_empty(&chan->tx_q))
1161 chan->tx_send_head = chan->tx_q.next;
1162
1163 chan->next_tx_seq = chan->expected_ack_seq;
1164 ret = l2cap_ertm_send(chan);
1165 return ret;
1166 }
1167
1168 static void l2cap_send_ack(struct l2cap_chan *chan)
1169 {
1170 u16 control = 0;
1171
1172 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1173
1174 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1175 control |= L2CAP_SUPER_RCV_NOT_READY;
1176 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1177 l2cap_send_sframe(chan, control);
1178 return;
1179 }
1180
1181 if (l2cap_ertm_send(chan) > 0)
1182 return;
1183
1184 control |= L2CAP_SUPER_RCV_READY;
1185 l2cap_send_sframe(chan, control);
1186 }
1187
1188 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1189 {
1190 struct srej_list *tail;
1191 u16 control;
1192
1193 control = L2CAP_SUPER_SELECT_REJECT;
1194 control |= L2CAP_CTRL_FINAL;
1195
1196 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1197 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1198
1199 l2cap_send_sframe(chan, control);
1200 }
1201
1202 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1203 {
1204 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1205 struct sk_buff **frag;
1206 int err, sent = 0;
1207
1208 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1209 return -EFAULT;
1210
1211 sent += count;
1212 len -= count;
1213
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1216 while (len) {
1217 count = min_t(unsigned int, conn->mtu, len);
1218
1219 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1220 if (!*frag)
1221 return err;
1222 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1223 return -EFAULT;
1224
1225 sent += count;
1226 len -= count;
1227
1228 frag = &(*frag)->next;
1229 }
1230
1231 return sent;
1232 }
1233
1234 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1235 {
1236 struct sock *sk = chan->sk;
1237 struct l2cap_conn *conn = chan->conn;
1238 struct sk_buff *skb;
1239 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1240 struct l2cap_hdr *lh;
1241
1242 BT_DBG("sk %p len %d", sk, (int)len);
1243
1244 count = min_t(unsigned int, (conn->mtu - hlen), len);
1245 skb = bt_skb_send_alloc(sk, count + hlen,
1246 msg->msg_flags & MSG_DONTWAIT, &err);
1247 if (!skb)
1248 return ERR_PTR(err);
1249
1250 /* Create L2CAP header */
1251 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1252 lh->cid = cpu_to_le16(chan->dcid);
1253 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1254 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1255
1256 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1257 if (unlikely(err < 0)) {
1258 kfree_skb(skb);
1259 return ERR_PTR(err);
1260 }
1261 return skb;
1262 }
1263
1264 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1265 {
1266 struct sock *sk = chan->sk;
1267 struct l2cap_conn *conn = chan->conn;
1268 struct sk_buff *skb;
1269 int err, count, hlen = L2CAP_HDR_SIZE;
1270 struct l2cap_hdr *lh;
1271
1272 BT_DBG("sk %p len %d", sk, (int)len);
1273
1274 count = min_t(unsigned int, (conn->mtu - hlen), len);
1275 skb = bt_skb_send_alloc(sk, count + hlen,
1276 msg->msg_flags & MSG_DONTWAIT, &err);
1277 if (!skb)
1278 return ERR_PTR(err);
1279
1280 /* Create L2CAP header */
1281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1282 lh->cid = cpu_to_le16(chan->dcid);
1283 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1284
1285 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1286 if (unlikely(err < 0)) {
1287 kfree_skb(skb);
1288 return ERR_PTR(err);
1289 }
1290 return skb;
1291 }
1292
1293 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1294 {
1295 struct sock *sk = chan->sk;
1296 struct l2cap_conn *conn = chan->conn;
1297 struct sk_buff *skb;
1298 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1299 struct l2cap_hdr *lh;
1300
1301 BT_DBG("sk %p len %d", sk, (int)len);
1302
1303 if (!conn)
1304 return ERR_PTR(-ENOTCONN);
1305
1306 if (sdulen)
1307 hlen += 2;
1308
1309 if (chan->fcs == L2CAP_FCS_CRC16)
1310 hlen += 2;
1311
1312 count = min_t(unsigned int, (conn->mtu - hlen), len);
1313 skb = bt_skb_send_alloc(sk, count + hlen,
1314 msg->msg_flags & MSG_DONTWAIT, &err);
1315 if (!skb)
1316 return ERR_PTR(err);
1317
1318 /* Create L2CAP header */
1319 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1320 lh->cid = cpu_to_le16(chan->dcid);
1321 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1322 put_unaligned_le16(control, skb_put(skb, 2));
1323 if (sdulen)
1324 put_unaligned_le16(sdulen, skb_put(skb, 2));
1325
1326 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1327 if (unlikely(err < 0)) {
1328 kfree_skb(skb);
1329 return ERR_PTR(err);
1330 }
1331
1332 if (chan->fcs == L2CAP_FCS_CRC16)
1333 put_unaligned_le16(0, skb_put(skb, 2));
1334
1335 bt_cb(skb)->retries = 0;
1336 return skb;
1337 }
1338
1339 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1340 {
1341 struct sk_buff *skb;
1342 struct sk_buff_head sar_queue;
1343 u16 control;
1344 size_t size = 0;
1345
1346 skb_queue_head_init(&sar_queue);
1347 control = L2CAP_SDU_START;
1348 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1349 if (IS_ERR(skb))
1350 return PTR_ERR(skb);
1351
1352 __skb_queue_tail(&sar_queue, skb);
1353 len -= chan->remote_mps;
1354 size += chan->remote_mps;
1355
1356 while (len > 0) {
1357 size_t buflen;
1358
1359 if (len > chan->remote_mps) {
1360 control = L2CAP_SDU_CONTINUE;
1361 buflen = chan->remote_mps;
1362 } else {
1363 control = L2CAP_SDU_END;
1364 buflen = len;
1365 }
1366
1367 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1368 if (IS_ERR(skb)) {
1369 skb_queue_purge(&sar_queue);
1370 return PTR_ERR(skb);
1371 }
1372
1373 __skb_queue_tail(&sar_queue, skb);
1374 len -= buflen;
1375 size += buflen;
1376 }
1377 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1378 if (chan->tx_send_head == NULL)
1379 chan->tx_send_head = sar_queue.next;
1380
1381 return size;
1382 }
1383
1384 static void l2cap_chan_ready(struct sock *sk)
1385 {
1386 struct sock *parent = bt_sk(sk)->parent;
1387 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1388
1389 BT_DBG("sk %p, parent %p", sk, parent);
1390
1391 chan->conf_state = 0;
1392 l2cap_sock_clear_timer(sk);
1393
1394 if (!parent) {
1395 /* Outgoing channel.
1396 * Wake up socket sleeping on connect.
1397 */
1398 sk->sk_state = BT_CONNECTED;
1399 sk->sk_state_change(sk);
1400 } else {
1401 /* Incoming channel.
1402 * Wake up socket sleeping on accept.
1403 */
1404 parent->sk_data_ready(parent, 0);
1405 }
1406 }
1407
1408 /* Copy frame to all raw sockets on that connection */
1409 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1410 {
1411 struct sk_buff *nskb;
1412 struct l2cap_chan *chan;
1413
1414 BT_DBG("conn %p", conn);
1415
1416 read_lock(&conn->chan_lock);
1417 list_for_each_entry(chan, &conn->chan_l, list) {
1418 struct sock *sk = chan->sk;
1419 if (sk->sk_type != SOCK_RAW)
1420 continue;
1421
1422 /* Don't send frame to the socket it came from */
1423 if (skb->sk == sk)
1424 continue;
1425 nskb = skb_clone(skb, GFP_ATOMIC);
1426 if (!nskb)
1427 continue;
1428
1429 if (sock_queue_rcv_skb(sk, nskb))
1430 kfree_skb(nskb);
1431 }
1432 read_unlock(&conn->chan_lock);
1433 }
1434
1435 /* ---- L2CAP signalling commands ---- */
1436 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1437 u8 code, u8 ident, u16 dlen, void *data)
1438 {
1439 struct sk_buff *skb, **frag;
1440 struct l2cap_cmd_hdr *cmd;
1441 struct l2cap_hdr *lh;
1442 int len, count;
1443
1444 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1445 conn, code, ident, dlen);
1446
1447 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1448 count = min_t(unsigned int, conn->mtu, len);
1449
1450 skb = bt_skb_alloc(count, GFP_ATOMIC);
1451 if (!skb)
1452 return NULL;
1453
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1456
1457 if (conn->hcon->type == LE_LINK)
1458 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1459 else
1460 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1461
1462 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1463 cmd->code = code;
1464 cmd->ident = ident;
1465 cmd->len = cpu_to_le16(dlen);
1466
1467 if (dlen) {
1468 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1469 memcpy(skb_put(skb, count), data, count);
1470 data += count;
1471 }
1472
1473 len -= skb->len;
1474
1475 /* Continuation fragments (no L2CAP header) */
1476 frag = &skb_shinfo(skb)->frag_list;
1477 while (len) {
1478 count = min_t(unsigned int, conn->mtu, len);
1479
1480 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1481 if (!*frag)
1482 goto fail;
1483
1484 memcpy(skb_put(*frag, count), data, count);
1485
1486 len -= count;
1487 data += count;
1488
1489 frag = &(*frag)->next;
1490 }
1491
1492 return skb;
1493
1494 fail:
1495 kfree_skb(skb);
1496 return NULL;
1497 }
1498
1499 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1500 {
1501 struct l2cap_conf_opt *opt = *ptr;
1502 int len;
1503
1504 len = L2CAP_CONF_OPT_SIZE + opt->len;
1505 *ptr += len;
1506
1507 *type = opt->type;
1508 *olen = opt->len;
1509
1510 switch (opt->len) {
1511 case 1:
1512 *val = *((u8 *) opt->val);
1513 break;
1514
1515 case 2:
1516 *val = get_unaligned_le16(opt->val);
1517 break;
1518
1519 case 4:
1520 *val = get_unaligned_le32(opt->val);
1521 break;
1522
1523 default:
1524 *val = (unsigned long) opt->val;
1525 break;
1526 }
1527
1528 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1529 return len;
1530 }
1531
1532 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1533 {
1534 struct l2cap_conf_opt *opt = *ptr;
1535
1536 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1537
1538 opt->type = type;
1539 opt->len = len;
1540
1541 switch (len) {
1542 case 1:
1543 *((u8 *) opt->val) = val;
1544 break;
1545
1546 case 2:
1547 put_unaligned_le16(val, opt->val);
1548 break;
1549
1550 case 4:
1551 put_unaligned_le32(val, opt->val);
1552 break;
1553
1554 default:
1555 memcpy(opt->val, (void *) val, len);
1556 break;
1557 }
1558
1559 *ptr += L2CAP_CONF_OPT_SIZE + len;
1560 }
1561
1562 static void l2cap_ack_timeout(unsigned long arg)
1563 {
1564 struct l2cap_chan *chan = (void *) arg;
1565
1566 bh_lock_sock(chan->sk);
1567 l2cap_send_ack(chan);
1568 bh_unlock_sock(chan->sk);
1569 }
1570
1571 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1572 {
1573 struct sock *sk = chan->sk;
1574
1575 chan->expected_ack_seq = 0;
1576 chan->unacked_frames = 0;
1577 chan->buffer_seq = 0;
1578 chan->num_acked = 0;
1579 chan->frames_sent = 0;
1580
1581 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1582 (unsigned long) chan);
1583 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1584 (unsigned long) chan);
1585 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1586
1587 skb_queue_head_init(&chan->srej_q);
1588 skb_queue_head_init(&chan->busy_q);
1589
1590 INIT_LIST_HEAD(&chan->srej_l);
1591
1592 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1593
1594 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1595 }
1596
1597 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1598 {
1599 switch (mode) {
1600 case L2CAP_MODE_STREAMING:
1601 case L2CAP_MODE_ERTM:
1602 if (l2cap_mode_supported(mode, remote_feat_mask))
1603 return mode;
1604 /* fall through */
1605 default:
1606 return L2CAP_MODE_BASIC;
1607 }
1608 }
1609
1610 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1611 {
1612 struct l2cap_conf_req *req = data;
1613 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1614 void *ptr = req->data;
1615
1616 BT_DBG("chan %p", chan);
1617
1618 if (chan->num_conf_req || chan->num_conf_rsp)
1619 goto done;
1620
1621 switch (chan->mode) {
1622 case L2CAP_MODE_STREAMING:
1623 case L2CAP_MODE_ERTM:
1624 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1625 break;
1626
1627 /* fall through */
1628 default:
1629 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1630 break;
1631 }
1632
1633 done:
1634 if (chan->imtu != L2CAP_DEFAULT_MTU)
1635 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1636
1637 switch (chan->mode) {
1638 case L2CAP_MODE_BASIC:
1639 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1640 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1641 break;
1642
1643 rfc.mode = L2CAP_MODE_BASIC;
1644 rfc.txwin_size = 0;
1645 rfc.max_transmit = 0;
1646 rfc.retrans_timeout = 0;
1647 rfc.monitor_timeout = 0;
1648 rfc.max_pdu_size = 0;
1649
1650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1651 (unsigned long) &rfc);
1652 break;
1653
1654 case L2CAP_MODE_ERTM:
1655 rfc.mode = L2CAP_MODE_ERTM;
1656 rfc.txwin_size = chan->tx_win;
1657 rfc.max_transmit = chan->max_tx;
1658 rfc.retrans_timeout = 0;
1659 rfc.monitor_timeout = 0;
1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1662 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1663
1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1665 (unsigned long) &rfc);
1666
1667 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1668 break;
1669
1670 if (chan->fcs == L2CAP_FCS_NONE ||
1671 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1672 chan->fcs = L2CAP_FCS_NONE;
1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1674 }
1675 break;
1676
1677 case L2CAP_MODE_STREAMING:
1678 rfc.mode = L2CAP_MODE_STREAMING;
1679 rfc.txwin_size = 0;
1680 rfc.max_transmit = 0;
1681 rfc.retrans_timeout = 0;
1682 rfc.monitor_timeout = 0;
1683 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1684 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1685 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1686
1687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1688 (unsigned long) &rfc);
1689
1690 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1691 break;
1692
1693 if (chan->fcs == L2CAP_FCS_NONE ||
1694 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1695 chan->fcs = L2CAP_FCS_NONE;
1696 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1697 }
1698 break;
1699 }
1700
1701 req->dcid = cpu_to_le16(chan->dcid);
1702 req->flags = cpu_to_le16(0);
1703
1704 return ptr - data;
1705 }
1706
1707 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1708 {
1709 struct l2cap_conf_rsp *rsp = data;
1710 void *ptr = rsp->data;
1711 void *req = chan->conf_req;
1712 int len = chan->conf_len;
1713 int type, hint, olen;
1714 unsigned long val;
1715 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1716 u16 mtu = L2CAP_DEFAULT_MTU;
1717 u16 result = L2CAP_CONF_SUCCESS;
1718
1719 BT_DBG("chan %p", chan);
1720
1721 while (len >= L2CAP_CONF_OPT_SIZE) {
1722 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1723
1724 hint = type & L2CAP_CONF_HINT;
1725 type &= L2CAP_CONF_MASK;
1726
1727 switch (type) {
1728 case L2CAP_CONF_MTU:
1729 mtu = val;
1730 break;
1731
1732 case L2CAP_CONF_FLUSH_TO:
1733 chan->flush_to = val;
1734 break;
1735
1736 case L2CAP_CONF_QOS:
1737 break;
1738
1739 case L2CAP_CONF_RFC:
1740 if (olen == sizeof(rfc))
1741 memcpy(&rfc, (void *) val, olen);
1742 break;
1743
1744 case L2CAP_CONF_FCS:
1745 if (val == L2CAP_FCS_NONE)
1746 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1747
1748 break;
1749
1750 default:
1751 if (hint)
1752 break;
1753
1754 result = L2CAP_CONF_UNKNOWN;
1755 *((u8 *) ptr++) = type;
1756 break;
1757 }
1758 }
1759
1760 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1761 goto done;
1762
1763 switch (chan->mode) {
1764 case L2CAP_MODE_STREAMING:
1765 case L2CAP_MODE_ERTM:
1766 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1767 chan->mode = l2cap_select_mode(rfc.mode,
1768 chan->conn->feat_mask);
1769 break;
1770 }
1771
1772 if (chan->mode != rfc.mode)
1773 return -ECONNREFUSED;
1774
1775 break;
1776 }
1777
1778 done:
1779 if (chan->mode != rfc.mode) {
1780 result = L2CAP_CONF_UNACCEPT;
1781 rfc.mode = chan->mode;
1782
1783 if (chan->num_conf_rsp == 1)
1784 return -ECONNREFUSED;
1785
1786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1787 sizeof(rfc), (unsigned long) &rfc);
1788 }
1789
1790
1791 if (result == L2CAP_CONF_SUCCESS) {
1792 /* Configure output options and let the other side know
1793 * which ones we don't like. */
1794
1795 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1796 result = L2CAP_CONF_UNACCEPT;
1797 else {
1798 chan->omtu = mtu;
1799 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1800 }
1801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1802
1803 switch (rfc.mode) {
1804 case L2CAP_MODE_BASIC:
1805 chan->fcs = L2CAP_FCS_NONE;
1806 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1807 break;
1808
1809 case L2CAP_MODE_ERTM:
1810 chan->remote_tx_win = rfc.txwin_size;
1811 chan->remote_max_tx = rfc.max_transmit;
1812
1813 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1814 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1815
1816 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1817
1818 rfc.retrans_timeout =
1819 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1820 rfc.monitor_timeout =
1821 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1822
1823 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1824
1825 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1826 sizeof(rfc), (unsigned long) &rfc);
1827
1828 break;
1829
1830 case L2CAP_MODE_STREAMING:
1831 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1832 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1833
1834 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1835
1836 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1837
1838 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1839 sizeof(rfc), (unsigned long) &rfc);
1840
1841 break;
1842
1843 default:
1844 result = L2CAP_CONF_UNACCEPT;
1845
1846 memset(&rfc, 0, sizeof(rfc));
1847 rfc.mode = chan->mode;
1848 }
1849
1850 if (result == L2CAP_CONF_SUCCESS)
1851 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1852 }
1853 rsp->scid = cpu_to_le16(chan->dcid);
1854 rsp->result = cpu_to_le16(result);
1855 rsp->flags = cpu_to_le16(0x0000);
1856
1857 return ptr - data;
1858 }
1859
1860 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1861 {
1862 struct l2cap_conf_req *req = data;
1863 void *ptr = req->data;
1864 int type, olen;
1865 unsigned long val;
1866 struct l2cap_conf_rfc rfc;
1867
1868 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1869
1870 while (len >= L2CAP_CONF_OPT_SIZE) {
1871 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1872
1873 switch (type) {
1874 case L2CAP_CONF_MTU:
1875 if (val < L2CAP_DEFAULT_MIN_MTU) {
1876 *result = L2CAP_CONF_UNACCEPT;
1877 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1878 } else
1879 chan->imtu = val;
1880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1881 break;
1882
1883 case L2CAP_CONF_FLUSH_TO:
1884 chan->flush_to = val;
1885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1886 2, chan->flush_to);
1887 break;
1888
1889 case L2CAP_CONF_RFC:
1890 if (olen == sizeof(rfc))
1891 memcpy(&rfc, (void *)val, olen);
1892
1893 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1894 rfc.mode != chan->mode)
1895 return -ECONNREFUSED;
1896
1897 chan->fcs = 0;
1898
1899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1900 sizeof(rfc), (unsigned long) &rfc);
1901 break;
1902 }
1903 }
1904
1905 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1906 return -ECONNREFUSED;
1907
1908 chan->mode = rfc.mode;
1909
1910 if (*result == L2CAP_CONF_SUCCESS) {
1911 switch (rfc.mode) {
1912 case L2CAP_MODE_ERTM:
1913 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1914 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1915 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1916 break;
1917 case L2CAP_MODE_STREAMING:
1918 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1919 }
1920 }
1921
1922 req->dcid = cpu_to_le16(chan->dcid);
1923 req->flags = cpu_to_le16(0x0000);
1924
1925 return ptr - data;
1926 }
1927
1928 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1929 {
1930 struct l2cap_conf_rsp *rsp = data;
1931 void *ptr = rsp->data;
1932
1933 BT_DBG("chan %p", chan);
1934
1935 rsp->scid = cpu_to_le16(chan->dcid);
1936 rsp->result = cpu_to_le16(result);
1937 rsp->flags = cpu_to_le16(flags);
1938
1939 return ptr - data;
1940 }
1941
1942 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
1943 {
1944 struct l2cap_conn_rsp rsp;
1945 struct l2cap_conn *conn = chan->conn;
1946 u8 buf[128];
1947
1948 rsp.scid = cpu_to_le16(chan->dcid);
1949 rsp.dcid = cpu_to_le16(chan->scid);
1950 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1951 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1952 l2cap_send_cmd(conn, chan->ident,
1953 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1954
1955 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
1956 return;
1957
1958 chan->conf_state |= L2CAP_CONF_REQ_SENT;
1959 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1960 l2cap_build_conf_req(chan, buf), buf);
1961 chan->num_conf_req++;
1962 }
1963
1964 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1965 {
1966 int type, olen;
1967 unsigned long val;
1968 struct l2cap_conf_rfc rfc;
1969
1970 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1971
1972 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
1973 return;
1974
1975 while (len >= L2CAP_CONF_OPT_SIZE) {
1976 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1977
1978 switch (type) {
1979 case L2CAP_CONF_RFC:
1980 if (olen == sizeof(rfc))
1981 memcpy(&rfc, (void *)val, olen);
1982 goto done;
1983 }
1984 }
1985
1986 done:
1987 switch (rfc.mode) {
1988 case L2CAP_MODE_ERTM:
1989 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1990 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1991 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1992 break;
1993 case L2CAP_MODE_STREAMING:
1994 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1995 }
1996 }
1997
1998 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1999 {
2000 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2001
2002 if (rej->reason != 0x0000)
2003 return 0;
2004
2005 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2006 cmd->ident == conn->info_ident) {
2007 del_timer(&conn->info_timer);
2008
2009 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2010 conn->info_ident = 0;
2011
2012 l2cap_conn_start(conn);
2013 }
2014
2015 return 0;
2016 }
2017
2018 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2019 {
2020 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2021 struct l2cap_conn_rsp rsp;
2022 struct l2cap_chan *chan = NULL;
2023 struct sock *parent, *sk = NULL;
2024 int result, status = L2CAP_CS_NO_INFO;
2025
2026 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2027 __le16 psm = req->psm;
2028
2029 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2030
2031 /* Check if we have socket listening on psm */
2032 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2033 if (!parent) {
2034 result = L2CAP_CR_BAD_PSM;
2035 goto sendresp;
2036 }
2037
2038 bh_lock_sock(parent);
2039
2040 /* Check if the ACL is secure enough (if not SDP) */
2041 if (psm != cpu_to_le16(0x0001) &&
2042 !hci_conn_check_link_mode(conn->hcon)) {
2043 conn->disc_reason = 0x05;
2044 result = L2CAP_CR_SEC_BLOCK;
2045 goto response;
2046 }
2047
2048 result = L2CAP_CR_NO_MEM;
2049
2050 /* Check for backlog size */
2051 if (sk_acceptq_is_full(parent)) {
2052 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2053 goto response;
2054 }
2055
2056 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2057 if (!sk)
2058 goto response;
2059
2060 chan = l2cap_chan_alloc(sk);
2061 if (!chan) {
2062 l2cap_sock_kill(sk);
2063 goto response;
2064 }
2065
2066 l2cap_pi(sk)->chan = chan;
2067
2068 write_lock_bh(&conn->chan_lock);
2069
2070 /* Check if we already have channel with that dcid */
2071 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2072 write_unlock_bh(&conn->chan_lock);
2073 sock_set_flag(sk, SOCK_ZAPPED);
2074 l2cap_sock_kill(sk);
2075 goto response;
2076 }
2077
2078 hci_conn_hold(conn->hcon);
2079
2080 l2cap_sock_init(sk, parent);
2081 bacpy(&bt_sk(sk)->src, conn->src);
2082 bacpy(&bt_sk(sk)->dst, conn->dst);
2083 chan->psm = psm;
2084 chan->dcid = scid;
2085
2086 bt_accept_enqueue(parent, sk);
2087
2088 __l2cap_chan_add(conn, chan);
2089
2090 dcid = chan->scid;
2091
2092 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2093
2094 chan->ident = cmd->ident;
2095
2096 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2097 if (l2cap_check_security(chan)) {
2098 if (bt_sk(sk)->defer_setup) {
2099 sk->sk_state = BT_CONNECT2;
2100 result = L2CAP_CR_PEND;
2101 status = L2CAP_CS_AUTHOR_PEND;
2102 parent->sk_data_ready(parent, 0);
2103 } else {
2104 sk->sk_state = BT_CONFIG;
2105 result = L2CAP_CR_SUCCESS;
2106 status = L2CAP_CS_NO_INFO;
2107 }
2108 } else {
2109 sk->sk_state = BT_CONNECT2;
2110 result = L2CAP_CR_PEND;
2111 status = L2CAP_CS_AUTHEN_PEND;
2112 }
2113 } else {
2114 sk->sk_state = BT_CONNECT2;
2115 result = L2CAP_CR_PEND;
2116 status = L2CAP_CS_NO_INFO;
2117 }
2118
2119 write_unlock_bh(&conn->chan_lock);
2120
2121 response:
2122 bh_unlock_sock(parent);
2123
2124 sendresp:
2125 rsp.scid = cpu_to_le16(scid);
2126 rsp.dcid = cpu_to_le16(dcid);
2127 rsp.result = cpu_to_le16(result);
2128 rsp.status = cpu_to_le16(status);
2129 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2130
2131 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2132 struct l2cap_info_req info;
2133 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2134
2135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2136 conn->info_ident = l2cap_get_ident(conn);
2137
2138 mod_timer(&conn->info_timer, jiffies +
2139 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2140
2141 l2cap_send_cmd(conn, conn->info_ident,
2142 L2CAP_INFO_REQ, sizeof(info), &info);
2143 }
2144
2145 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2146 result == L2CAP_CR_SUCCESS) {
2147 u8 buf[128];
2148 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2149 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2150 l2cap_build_conf_req(chan, buf), buf);
2151 chan->num_conf_req++;
2152 }
2153
2154 return 0;
2155 }
2156
2157 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2158 {
2159 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2160 u16 scid, dcid, result, status;
2161 struct l2cap_chan *chan;
2162 struct sock *sk;
2163 u8 req[128];
2164
2165 scid = __le16_to_cpu(rsp->scid);
2166 dcid = __le16_to_cpu(rsp->dcid);
2167 result = __le16_to_cpu(rsp->result);
2168 status = __le16_to_cpu(rsp->status);
2169
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2171
2172 if (scid) {
2173 chan = l2cap_get_chan_by_scid(conn, scid);
2174 if (!chan)
2175 return -EFAULT;
2176 } else {
2177 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2178 if (!chan)
2179 return -EFAULT;
2180 }
2181
2182 sk = chan->sk;
2183
2184 switch (result) {
2185 case L2CAP_CR_SUCCESS:
2186 sk->sk_state = BT_CONFIG;
2187 chan->ident = 0;
2188 chan->dcid = dcid;
2189 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2190
2191 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2192 break;
2193
2194 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2195
2196 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2197 l2cap_build_conf_req(chan, req), req);
2198 chan->num_conf_req++;
2199 break;
2200
2201 case L2CAP_CR_PEND:
2202 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2203 break;
2204
2205 default:
2206 /* don't delete l2cap channel if sk is owned by user */
2207 if (sock_owned_by_user(sk)) {
2208 sk->sk_state = BT_DISCONN;
2209 l2cap_sock_clear_timer(sk);
2210 l2cap_sock_set_timer(sk, HZ / 5);
2211 break;
2212 }
2213
2214 l2cap_chan_del(chan, ECONNREFUSED);
2215 break;
2216 }
2217
2218 bh_unlock_sock(sk);
2219 return 0;
2220 }
2221
2222 static inline void set_default_fcs(struct l2cap_chan *chan)
2223 {
2224 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2225
2226 /* FCS is enabled only in ERTM or streaming mode, if one or both
2227 * sides request it.
2228 */
2229 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2230 chan->fcs = L2CAP_FCS_NONE;
2231 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2232 chan->fcs = L2CAP_FCS_CRC16;
2233 }
2234
2235 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2236 {
2237 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2238 u16 dcid, flags;
2239 u8 rsp[64];
2240 struct l2cap_chan *chan;
2241 struct sock *sk;
2242 int len;
2243
2244 dcid = __le16_to_cpu(req->dcid);
2245 flags = __le16_to_cpu(req->flags);
2246
2247 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2248
2249 chan = l2cap_get_chan_by_scid(conn, dcid);
2250 if (!chan)
2251 return -ENOENT;
2252
2253 sk = chan->sk;
2254
2255 if (sk->sk_state != BT_CONFIG) {
2256 struct l2cap_cmd_rej rej;
2257
2258 rej.reason = cpu_to_le16(0x0002);
2259 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2260 sizeof(rej), &rej);
2261 goto unlock;
2262 }
2263
2264 /* Reject if config buffer is too small. */
2265 len = cmd_len - sizeof(*req);
2266 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2267 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2268 l2cap_build_conf_rsp(chan, rsp,
2269 L2CAP_CONF_REJECT, flags), rsp);
2270 goto unlock;
2271 }
2272
2273 /* Store config. */
2274 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2275 chan->conf_len += len;
2276
2277 if (flags & 0x0001) {
2278 /* Incomplete config. Send empty response. */
2279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2280 l2cap_build_conf_rsp(chan, rsp,
2281 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2282 goto unlock;
2283 }
2284
2285 /* Complete config. */
2286 len = l2cap_parse_conf_req(chan, rsp);
2287 if (len < 0) {
2288 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2289 goto unlock;
2290 }
2291
2292 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2293 chan->num_conf_rsp++;
2294
2295 /* Reset config buffer. */
2296 chan->conf_len = 0;
2297
2298 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2299 goto unlock;
2300
2301 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2302 set_default_fcs(chan);
2303
2304 sk->sk_state = BT_CONNECTED;
2305
2306 chan->next_tx_seq = 0;
2307 chan->expected_tx_seq = 0;
2308 skb_queue_head_init(&chan->tx_q);
2309 if (chan->mode == L2CAP_MODE_ERTM)
2310 l2cap_ertm_init(chan);
2311
2312 l2cap_chan_ready(sk);
2313 goto unlock;
2314 }
2315
2316 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2317 u8 buf[64];
2318 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2319 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2320 l2cap_build_conf_req(chan, buf), buf);
2321 chan->num_conf_req++;
2322 }
2323
2324 unlock:
2325 bh_unlock_sock(sk);
2326 return 0;
2327 }
2328
2329 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2330 {
2331 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2332 u16 scid, flags, result;
2333 struct l2cap_chan *chan;
2334 struct sock *sk;
2335 int len = cmd->len - sizeof(*rsp);
2336
2337 scid = __le16_to_cpu(rsp->scid);
2338 flags = __le16_to_cpu(rsp->flags);
2339 result = __le16_to_cpu(rsp->result);
2340
2341 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2342 scid, flags, result);
2343
2344 chan = l2cap_get_chan_by_scid(conn, scid);
2345 if (!chan)
2346 return 0;
2347
2348 sk = chan->sk;
2349
2350 switch (result) {
2351 case L2CAP_CONF_SUCCESS:
2352 l2cap_conf_rfc_get(chan, rsp->data, len);
2353 break;
2354
2355 case L2CAP_CONF_UNACCEPT:
2356 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2357 char req[64];
2358
2359 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2360 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2361 goto done;
2362 }
2363
2364 /* throw out any old stored conf requests */
2365 result = L2CAP_CONF_SUCCESS;
2366 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2367 req, &result);
2368 if (len < 0) {
2369 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2370 goto done;
2371 }
2372
2373 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2374 L2CAP_CONF_REQ, len, req);
2375 chan->num_conf_req++;
2376 if (result != L2CAP_CONF_SUCCESS)
2377 goto done;
2378 break;
2379 }
2380
2381 default:
2382 sk->sk_err = ECONNRESET;
2383 l2cap_sock_set_timer(sk, HZ * 5);
2384 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2385 goto done;
2386 }
2387
2388 if (flags & 0x01)
2389 goto done;
2390
2391 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2392
2393 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2394 set_default_fcs(chan);
2395
2396 sk->sk_state = BT_CONNECTED;
2397 chan->next_tx_seq = 0;
2398 chan->expected_tx_seq = 0;
2399 skb_queue_head_init(&chan->tx_q);
2400 if (chan->mode == L2CAP_MODE_ERTM)
2401 l2cap_ertm_init(chan);
2402
2403 l2cap_chan_ready(sk);
2404 }
2405
2406 done:
2407 bh_unlock_sock(sk);
2408 return 0;
2409 }
2410
2411 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2412 {
2413 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2414 struct l2cap_disconn_rsp rsp;
2415 u16 dcid, scid;
2416 struct l2cap_chan *chan;
2417 struct sock *sk;
2418
2419 scid = __le16_to_cpu(req->scid);
2420 dcid = __le16_to_cpu(req->dcid);
2421
2422 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2423
2424 chan = l2cap_get_chan_by_scid(conn, dcid);
2425 if (!chan)
2426 return 0;
2427
2428 sk = chan->sk;
2429
2430 rsp.dcid = cpu_to_le16(chan->scid);
2431 rsp.scid = cpu_to_le16(chan->dcid);
2432 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2433
2434 sk->sk_shutdown = SHUTDOWN_MASK;
2435
2436 /* don't delete l2cap channel if sk is owned by user */
2437 if (sock_owned_by_user(sk)) {
2438 sk->sk_state = BT_DISCONN;
2439 l2cap_sock_clear_timer(sk);
2440 l2cap_sock_set_timer(sk, HZ / 5);
2441 bh_unlock_sock(sk);
2442 return 0;
2443 }
2444
2445 l2cap_chan_del(chan, ECONNRESET);
2446 bh_unlock_sock(sk);
2447
2448 l2cap_sock_kill(sk);
2449 return 0;
2450 }
2451
2452 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2453 {
2454 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2455 u16 dcid, scid;
2456 struct l2cap_chan *chan;
2457 struct sock *sk;
2458
2459 scid = __le16_to_cpu(rsp->scid);
2460 dcid = __le16_to_cpu(rsp->dcid);
2461
2462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2463
2464 chan = l2cap_get_chan_by_scid(conn, scid);
2465 if (!chan)
2466 return 0;
2467
2468 sk = chan->sk;
2469
2470 /* don't delete l2cap channel if sk is owned by user */
2471 if (sock_owned_by_user(sk)) {
2472 sk->sk_state = BT_DISCONN;
2473 l2cap_sock_clear_timer(sk);
2474 l2cap_sock_set_timer(sk, HZ / 5);
2475 bh_unlock_sock(sk);
2476 return 0;
2477 }
2478
2479 l2cap_chan_del(chan, 0);
2480 bh_unlock_sock(sk);
2481
2482 l2cap_sock_kill(sk);
2483 return 0;
2484 }
2485
2486 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2487 {
2488 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2489 u16 type;
2490
2491 type = __le16_to_cpu(req->type);
2492
2493 BT_DBG("type 0x%4.4x", type);
2494
2495 if (type == L2CAP_IT_FEAT_MASK) {
2496 u8 buf[8];
2497 u32 feat_mask = l2cap_feat_mask;
2498 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2499 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2500 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2501 if (!disable_ertm)
2502 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2503 | L2CAP_FEAT_FCS;
2504 put_unaligned_le32(feat_mask, rsp->data);
2505 l2cap_send_cmd(conn, cmd->ident,
2506 L2CAP_INFO_RSP, sizeof(buf), buf);
2507 } else if (type == L2CAP_IT_FIXED_CHAN) {
2508 u8 buf[12];
2509 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2512 memcpy(buf + 4, l2cap_fixed_chan, 8);
2513 l2cap_send_cmd(conn, cmd->ident,
2514 L2CAP_INFO_RSP, sizeof(buf), buf);
2515 } else {
2516 struct l2cap_info_rsp rsp;
2517 rsp.type = cpu_to_le16(type);
2518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2521 }
2522
2523 return 0;
2524 }
2525
2526 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2527 {
2528 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2529 u16 type, result;
2530
2531 type = __le16_to_cpu(rsp->type);
2532 result = __le16_to_cpu(rsp->result);
2533
2534 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2535
2536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2537 if (cmd->ident != conn->info_ident ||
2538 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2539 return 0;
2540
2541 del_timer(&conn->info_timer);
2542
2543 if (result != L2CAP_IR_SUCCESS) {
2544 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2545 conn->info_ident = 0;
2546
2547 l2cap_conn_start(conn);
2548
2549 return 0;
2550 }
2551
2552 if (type == L2CAP_IT_FEAT_MASK) {
2553 conn->feat_mask = get_unaligned_le32(rsp->data);
2554
2555 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2556 struct l2cap_info_req req;
2557 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2558
2559 conn->info_ident = l2cap_get_ident(conn);
2560
2561 l2cap_send_cmd(conn, conn->info_ident,
2562 L2CAP_INFO_REQ, sizeof(req), &req);
2563 } else {
2564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2565 conn->info_ident = 0;
2566
2567 l2cap_conn_start(conn);
2568 }
2569 } else if (type == L2CAP_IT_FIXED_CHAN) {
2570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2571 conn->info_ident = 0;
2572
2573 l2cap_conn_start(conn);
2574 }
2575
2576 return 0;
2577 }
2578
2579 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2580 u16 to_multiplier)
2581 {
2582 u16 max_latency;
2583
2584 if (min > max || min < 6 || max > 3200)
2585 return -EINVAL;
2586
2587 if (to_multiplier < 10 || to_multiplier > 3200)
2588 return -EINVAL;
2589
2590 if (max >= to_multiplier * 8)
2591 return -EINVAL;
2592
2593 max_latency = (to_multiplier * 8 / max) - 1;
2594 if (latency > 499 || latency > max_latency)
2595 return -EINVAL;
2596
2597 return 0;
2598 }
2599
2600 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2601 struct l2cap_cmd_hdr *cmd, u8 *data)
2602 {
2603 struct hci_conn *hcon = conn->hcon;
2604 struct l2cap_conn_param_update_req *req;
2605 struct l2cap_conn_param_update_rsp rsp;
2606 u16 min, max, latency, to_multiplier, cmd_len;
2607 int err;
2608
2609 if (!(hcon->link_mode & HCI_LM_MASTER))
2610 return -EINVAL;
2611
2612 cmd_len = __le16_to_cpu(cmd->len);
2613 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2614 return -EPROTO;
2615
2616 req = (struct l2cap_conn_param_update_req *) data;
2617 min = __le16_to_cpu(req->min);
2618 max = __le16_to_cpu(req->max);
2619 latency = __le16_to_cpu(req->latency);
2620 to_multiplier = __le16_to_cpu(req->to_multiplier);
2621
2622 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2623 min, max, latency, to_multiplier);
2624
2625 memset(&rsp, 0, sizeof(rsp));
2626
2627 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2628 if (err)
2629 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2630 else
2631 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2632
2633 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2634 sizeof(rsp), &rsp);
2635
2636 if (!err)
2637 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2638
2639 return 0;
2640 }
2641
2642 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2643 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2644 {
2645 int err = 0;
2646
2647 switch (cmd->code) {
2648 case L2CAP_COMMAND_REJ:
2649 l2cap_command_rej(conn, cmd, data);
2650 break;
2651
2652 case L2CAP_CONN_REQ:
2653 err = l2cap_connect_req(conn, cmd, data);
2654 break;
2655
2656 case L2CAP_CONN_RSP:
2657 err = l2cap_connect_rsp(conn, cmd, data);
2658 break;
2659
2660 case L2CAP_CONF_REQ:
2661 err = l2cap_config_req(conn, cmd, cmd_len, data);
2662 break;
2663
2664 case L2CAP_CONF_RSP:
2665 err = l2cap_config_rsp(conn, cmd, data);
2666 break;
2667
2668 case L2CAP_DISCONN_REQ:
2669 err = l2cap_disconnect_req(conn, cmd, data);
2670 break;
2671
2672 case L2CAP_DISCONN_RSP:
2673 err = l2cap_disconnect_rsp(conn, cmd, data);
2674 break;
2675
2676 case L2CAP_ECHO_REQ:
2677 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2678 break;
2679
2680 case L2CAP_ECHO_RSP:
2681 break;
2682
2683 case L2CAP_INFO_REQ:
2684 err = l2cap_information_req(conn, cmd, data);
2685 break;
2686
2687 case L2CAP_INFO_RSP:
2688 err = l2cap_information_rsp(conn, cmd, data);
2689 break;
2690
2691 default:
2692 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2693 err = -EINVAL;
2694 break;
2695 }
2696
2697 return err;
2698 }
2699
2700 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2701 struct l2cap_cmd_hdr *cmd, u8 *data)
2702 {
2703 switch (cmd->code) {
2704 case L2CAP_COMMAND_REJ:
2705 return 0;
2706
2707 case L2CAP_CONN_PARAM_UPDATE_REQ:
2708 return l2cap_conn_param_update_req(conn, cmd, data);
2709
2710 case L2CAP_CONN_PARAM_UPDATE_RSP:
2711 return 0;
2712
2713 default:
2714 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2715 return -EINVAL;
2716 }
2717 }
2718
2719 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2720 struct sk_buff *skb)
2721 {
2722 u8 *data = skb->data;
2723 int len = skb->len;
2724 struct l2cap_cmd_hdr cmd;
2725 int err;
2726
2727 l2cap_raw_recv(conn, skb);
2728
2729 while (len >= L2CAP_CMD_HDR_SIZE) {
2730 u16 cmd_len;
2731 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2732 data += L2CAP_CMD_HDR_SIZE;
2733 len -= L2CAP_CMD_HDR_SIZE;
2734
2735 cmd_len = le16_to_cpu(cmd.len);
2736
2737 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2738
2739 if (cmd_len > len || !cmd.ident) {
2740 BT_DBG("corrupted command");
2741 break;
2742 }
2743
2744 if (conn->hcon->type == LE_LINK)
2745 err = l2cap_le_sig_cmd(conn, &cmd, data);
2746 else
2747 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2748
2749 if (err) {
2750 struct l2cap_cmd_rej rej;
2751
2752 BT_ERR("Wrong link type (%d)", err);
2753
2754 /* FIXME: Map err to a valid reason */
2755 rej.reason = cpu_to_le16(0);
2756 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2757 }
2758
2759 data += cmd_len;
2760 len -= cmd_len;
2761 }
2762
2763 kfree_skb(skb);
2764 }
2765
2766 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2767 {
2768 u16 our_fcs, rcv_fcs;
2769 int hdr_size = L2CAP_HDR_SIZE + 2;
2770
2771 if (chan->fcs == L2CAP_FCS_CRC16) {
2772 skb_trim(skb, skb->len - 2);
2773 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2774 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2775
2776 if (our_fcs != rcv_fcs)
2777 return -EBADMSG;
2778 }
2779 return 0;
2780 }
2781
2782 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2783 {
2784 u16 control = 0;
2785
2786 chan->frames_sent = 0;
2787
2788 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2789
2790 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2791 control |= L2CAP_SUPER_RCV_NOT_READY;
2792 l2cap_send_sframe(chan, control);
2793 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2794 }
2795
2796 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2797 l2cap_retransmit_frames(chan);
2798
2799 l2cap_ertm_send(chan);
2800
2801 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2802 chan->frames_sent == 0) {
2803 control |= L2CAP_SUPER_RCV_READY;
2804 l2cap_send_sframe(chan, control);
2805 }
2806 }
2807
2808 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2809 {
2810 struct sk_buff *next_skb;
2811 int tx_seq_offset, next_tx_seq_offset;
2812
2813 bt_cb(skb)->tx_seq = tx_seq;
2814 bt_cb(skb)->sar = sar;
2815
2816 next_skb = skb_peek(&chan->srej_q);
2817 if (!next_skb) {
2818 __skb_queue_tail(&chan->srej_q, skb);
2819 return 0;
2820 }
2821
2822 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2823 if (tx_seq_offset < 0)
2824 tx_seq_offset += 64;
2825
2826 do {
2827 if (bt_cb(next_skb)->tx_seq == tx_seq)
2828 return -EINVAL;
2829
2830 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2831 chan->buffer_seq) % 64;
2832 if (next_tx_seq_offset < 0)
2833 next_tx_seq_offset += 64;
2834
2835 if (next_tx_seq_offset > tx_seq_offset) {
2836 __skb_queue_before(&chan->srej_q, next_skb, skb);
2837 return 0;
2838 }
2839
2840 if (skb_queue_is_last(&chan->srej_q, next_skb))
2841 break;
2842
2843 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2844
2845 __skb_queue_tail(&chan->srej_q, skb);
2846
2847 return 0;
2848 }
2849
2850 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2851 {
2852 struct sk_buff *_skb;
2853 int err;
2854
2855 switch (control & L2CAP_CTRL_SAR) {
2856 case L2CAP_SDU_UNSEGMENTED:
2857 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2858 goto drop;
2859
2860 err = sock_queue_rcv_skb(chan->sk, skb);
2861 if (!err)
2862 return err;
2863
2864 break;
2865
2866 case L2CAP_SDU_START:
2867 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2868 goto drop;
2869
2870 chan->sdu_len = get_unaligned_le16(skb->data);
2871
2872 if (chan->sdu_len > chan->imtu)
2873 goto disconnect;
2874
2875 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2876 if (!chan->sdu)
2877 return -ENOMEM;
2878
2879 /* pull sdu_len bytes only after alloc, because of Local Busy
2880 * condition we have to be sure that this will be executed
2881 * only once, i.e., when alloc does not fail */
2882 skb_pull(skb, 2);
2883
2884 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2885
2886 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2887 chan->partial_sdu_len = skb->len;
2888 break;
2889
2890 case L2CAP_SDU_CONTINUE:
2891 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2892 goto disconnect;
2893
2894 if (!chan->sdu)
2895 goto disconnect;
2896
2897 chan->partial_sdu_len += skb->len;
2898 if (chan->partial_sdu_len > chan->sdu_len)
2899 goto drop;
2900
2901 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2902
2903 break;
2904
2905 case L2CAP_SDU_END:
2906 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2907 goto disconnect;
2908
2909 if (!chan->sdu)
2910 goto disconnect;
2911
2912 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2913 chan->partial_sdu_len += skb->len;
2914
2915 if (chan->partial_sdu_len > chan->imtu)
2916 goto drop;
2917
2918 if (chan->partial_sdu_len != chan->sdu_len)
2919 goto drop;
2920
2921 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2922 }
2923
2924 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2925 if (!_skb) {
2926 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2927 return -ENOMEM;
2928 }
2929
2930 err = sock_queue_rcv_skb(chan->sk, _skb);
2931 if (err < 0) {
2932 kfree_skb(_skb);
2933 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2934 return err;
2935 }
2936
2937 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2938 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2939
2940 kfree_skb(chan->sdu);
2941 break;
2942 }
2943
2944 kfree_skb(skb);
2945 return 0;
2946
2947 drop:
2948 kfree_skb(chan->sdu);
2949 chan->sdu = NULL;
2950
2951 disconnect:
2952 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
2953 kfree_skb(skb);
2954 return 0;
2955 }
2956
2957 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2958 {
2959 struct sk_buff *skb;
2960 u16 control;
2961 int err;
2962
2963 while ((skb = skb_dequeue(&chan->busy_q))) {
2964 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2965 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2966 if (err < 0) {
2967 skb_queue_head(&chan->busy_q, skb);
2968 return -EBUSY;
2969 }
2970
2971 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2972 }
2973
2974 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2975 goto done;
2976
2977 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2978 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2979 l2cap_send_sframe(chan, control);
2980 chan->retry_count = 1;
2981
2982 del_timer(&chan->retrans_timer);
2983 __mod_monitor_timer();
2984
2985 chan->conn_state |= L2CAP_CONN_WAIT_F;
2986
2987 done:
2988 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2989 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2990
2991 BT_DBG("chan %p, Exit local busy", chan);
2992
2993 return 0;
2994 }
2995
2996 static void l2cap_busy_work(struct work_struct *work)
2997 {
2998 DECLARE_WAITQUEUE(wait, current);
2999 struct l2cap_chan *chan =
3000 container_of(work, struct l2cap_chan, busy_work);
3001 struct sock *sk = chan->sk;
3002 int n_tries = 0, timeo = HZ/5, err;
3003 struct sk_buff *skb;
3004
3005 lock_sock(sk);
3006
3007 add_wait_queue(sk_sleep(sk), &wait);
3008 while ((skb = skb_peek(&chan->busy_q))) {
3009 set_current_state(TASK_INTERRUPTIBLE);
3010
3011 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3012 err = -EBUSY;
3013 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3014 break;
3015 }
3016
3017 if (!timeo)
3018 timeo = HZ/5;
3019
3020 if (signal_pending(current)) {
3021 err = sock_intr_errno(timeo);
3022 break;
3023 }
3024
3025 release_sock(sk);
3026 timeo = schedule_timeout(timeo);
3027 lock_sock(sk);
3028
3029 err = sock_error(sk);
3030 if (err)
3031 break;
3032
3033 if (l2cap_try_push_rx_skb(chan) == 0)
3034 break;
3035 }
3036
3037 set_current_state(TASK_RUNNING);
3038 remove_wait_queue(sk_sleep(sk), &wait);
3039
3040 release_sock(sk);
3041 }
3042
3043 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3044 {
3045 int sctrl, err;
3046
3047 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3048 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3049 __skb_queue_tail(&chan->busy_q, skb);
3050 return l2cap_try_push_rx_skb(chan);
3051
3052
3053 }
3054
3055 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3056 if (err >= 0) {
3057 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3058 return err;
3059 }
3060
3061 /* Busy Condition */
3062 BT_DBG("chan %p, Enter local busy", chan);
3063
3064 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3065 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3066 __skb_queue_tail(&chan->busy_q, skb);
3067
3068 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3069 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3070 l2cap_send_sframe(chan, sctrl);
3071
3072 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3073
3074 del_timer(&chan->ack_timer);
3075
3076 queue_work(_busy_wq, &chan->busy_work);
3077
3078 return err;
3079 }
3080
3081 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3082 {
3083 struct sk_buff *_skb;
3084 int err = -EINVAL;
3085
3086 /*
3087 * TODO: We have to notify the userland if some data is lost with the
3088 * Streaming Mode.
3089 */
3090
3091 switch (control & L2CAP_CTRL_SAR) {
3092 case L2CAP_SDU_UNSEGMENTED:
3093 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3094 kfree_skb(chan->sdu);
3095 break;
3096 }
3097
3098 err = sock_queue_rcv_skb(chan->sk, skb);
3099 if (!err)
3100 return 0;
3101
3102 break;
3103
3104 case L2CAP_SDU_START:
3105 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3106 kfree_skb(chan->sdu);
3107 break;
3108 }
3109
3110 chan->sdu_len = get_unaligned_le16(skb->data);
3111 skb_pull(skb, 2);
3112
3113 if (chan->sdu_len > chan->imtu) {
3114 err = -EMSGSIZE;
3115 break;
3116 }
3117
3118 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3119 if (!chan->sdu) {
3120 err = -ENOMEM;
3121 break;
3122 }
3123
3124 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3125
3126 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3127 chan->partial_sdu_len = skb->len;
3128 err = 0;
3129 break;
3130
3131 case L2CAP_SDU_CONTINUE:
3132 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3133 break;
3134
3135 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3136
3137 chan->partial_sdu_len += skb->len;
3138 if (chan->partial_sdu_len > chan->sdu_len)
3139 kfree_skb(chan->sdu);
3140 else
3141 err = 0;
3142
3143 break;
3144
3145 case L2CAP_SDU_END:
3146 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3147 break;
3148
3149 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3150
3151 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3152 chan->partial_sdu_len += skb->len;
3153
3154 if (chan->partial_sdu_len > chan->imtu)
3155 goto drop;
3156
3157 if (chan->partial_sdu_len == chan->sdu_len) {
3158 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3159 err = sock_queue_rcv_skb(chan->sk, _skb);
3160 if (err < 0)
3161 kfree_skb(_skb);
3162 }
3163 err = 0;
3164
3165 drop:
3166 kfree_skb(chan->sdu);
3167 break;
3168 }
3169
3170 kfree_skb(skb);
3171 return err;
3172 }
3173
3174 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3175 {
3176 struct sk_buff *skb;
3177 u16 control;
3178
3179 while ((skb = skb_peek(&chan->srej_q))) {
3180 if (bt_cb(skb)->tx_seq != tx_seq)
3181 break;
3182
3183 skb = skb_dequeue(&chan->srej_q);
3184 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3185 l2cap_ertm_reassembly_sdu(chan, skb, control);
3186 chan->buffer_seq_srej =
3187 (chan->buffer_seq_srej + 1) % 64;
3188 tx_seq = (tx_seq + 1) % 64;
3189 }
3190 }
3191
3192 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3193 {
3194 struct srej_list *l, *tmp;
3195 u16 control;
3196
3197 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3198 if (l->tx_seq == tx_seq) {
3199 list_del(&l->list);
3200 kfree(l);
3201 return;
3202 }
3203 control = L2CAP_SUPER_SELECT_REJECT;
3204 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3205 l2cap_send_sframe(chan, control);
3206 list_del(&l->list);
3207 list_add_tail(&l->list, &chan->srej_l);
3208 }
3209 }
3210
3211 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3212 {
3213 struct srej_list *new;
3214 u16 control;
3215
3216 while (tx_seq != chan->expected_tx_seq) {
3217 control = L2CAP_SUPER_SELECT_REJECT;
3218 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3219 l2cap_send_sframe(chan, control);
3220
3221 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3222 new->tx_seq = chan->expected_tx_seq;
3223 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3224 list_add_tail(&new->list, &chan->srej_l);
3225 }
3226 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3227 }
3228
3229 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3230 {
3231 u8 tx_seq = __get_txseq(rx_control);
3232 u8 req_seq = __get_reqseq(rx_control);
3233 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3234 int tx_seq_offset, expected_tx_seq_offset;
3235 int num_to_ack = (chan->tx_win/6) + 1;
3236 int err = 0;
3237
3238 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3239 tx_seq, rx_control);
3240
3241 if (L2CAP_CTRL_FINAL & rx_control &&
3242 chan->conn_state & L2CAP_CONN_WAIT_F) {
3243 del_timer(&chan->monitor_timer);
3244 if (chan->unacked_frames > 0)
3245 __mod_retrans_timer();
3246 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3247 }
3248
3249 chan->expected_ack_seq = req_seq;
3250 l2cap_drop_acked_frames(chan);
3251
3252 if (tx_seq == chan->expected_tx_seq)
3253 goto expected;
3254
3255 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3256 if (tx_seq_offset < 0)
3257 tx_seq_offset += 64;
3258
3259 /* invalid tx_seq */
3260 if (tx_seq_offset >= chan->tx_win) {
3261 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3262 goto drop;
3263 }
3264
3265 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3266 goto drop;
3267
3268 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3269 struct srej_list *first;
3270
3271 first = list_first_entry(&chan->srej_l,
3272 struct srej_list, list);
3273 if (tx_seq == first->tx_seq) {
3274 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3275 l2cap_check_srej_gap(chan, tx_seq);
3276
3277 list_del(&first->list);
3278 kfree(first);
3279
3280 if (list_empty(&chan->srej_l)) {
3281 chan->buffer_seq = chan->buffer_seq_srej;
3282 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3283 l2cap_send_ack(chan);
3284 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3285 }
3286 } else {
3287 struct srej_list *l;
3288
3289 /* duplicated tx_seq */
3290 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3291 goto drop;
3292
3293 list_for_each_entry(l, &chan->srej_l, list) {
3294 if (l->tx_seq == tx_seq) {
3295 l2cap_resend_srejframe(chan, tx_seq);
3296 return 0;
3297 }
3298 }
3299 l2cap_send_srejframe(chan, tx_seq);
3300 }
3301 } else {
3302 expected_tx_seq_offset =
3303 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3304 if (expected_tx_seq_offset < 0)
3305 expected_tx_seq_offset += 64;
3306
3307 /* duplicated tx_seq */
3308 if (tx_seq_offset < expected_tx_seq_offset)
3309 goto drop;
3310
3311 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3312
3313 BT_DBG("chan %p, Enter SREJ", chan);
3314
3315 INIT_LIST_HEAD(&chan->srej_l);
3316 chan->buffer_seq_srej = chan->buffer_seq;
3317
3318 __skb_queue_head_init(&chan->srej_q);
3319 __skb_queue_head_init(&chan->busy_q);
3320 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3321
3322 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3323
3324 l2cap_send_srejframe(chan, tx_seq);
3325
3326 del_timer(&chan->ack_timer);
3327 }
3328 return 0;
3329
3330 expected:
3331 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3332
3333 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3334 bt_cb(skb)->tx_seq = tx_seq;
3335 bt_cb(skb)->sar = sar;
3336 __skb_queue_tail(&chan->srej_q, skb);
3337 return 0;
3338 }
3339
3340 err = l2cap_push_rx_skb(chan, skb, rx_control);
3341 if (err < 0)
3342 return 0;
3343
3344 if (rx_control & L2CAP_CTRL_FINAL) {
3345 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3346 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3347 else
3348 l2cap_retransmit_frames(chan);
3349 }
3350
3351 __mod_ack_timer();
3352
3353 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3354 if (chan->num_acked == num_to_ack - 1)
3355 l2cap_send_ack(chan);
3356
3357 return 0;
3358
3359 drop:
3360 kfree_skb(skb);
3361 return 0;
3362 }
3363
3364 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3365 {
3366 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3367 rx_control);
3368
3369 chan->expected_ack_seq = __get_reqseq(rx_control);
3370 l2cap_drop_acked_frames(chan);
3371
3372 if (rx_control & L2CAP_CTRL_POLL) {
3373 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3374 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3375 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3376 (chan->unacked_frames > 0))
3377 __mod_retrans_timer();
3378
3379 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3380 l2cap_send_srejtail(chan);
3381 } else {
3382 l2cap_send_i_or_rr_or_rnr(chan);
3383 }
3384
3385 } else if (rx_control & L2CAP_CTRL_FINAL) {
3386 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3387
3388 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3389 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3390 else
3391 l2cap_retransmit_frames(chan);
3392
3393 } else {
3394 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3395 (chan->unacked_frames > 0))
3396 __mod_retrans_timer();
3397
3398 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3399 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3400 l2cap_send_ack(chan);
3401 else
3402 l2cap_ertm_send(chan);
3403 }
3404 }
3405
3406 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3407 {
3408 u8 tx_seq = __get_reqseq(rx_control);
3409
3410 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3411
3412 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3413
3414 chan->expected_ack_seq = tx_seq;
3415 l2cap_drop_acked_frames(chan);
3416
3417 if (rx_control & L2CAP_CTRL_FINAL) {
3418 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3419 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3420 else
3421 l2cap_retransmit_frames(chan);
3422 } else {
3423 l2cap_retransmit_frames(chan);
3424
3425 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3426 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3427 }
3428 }
3429 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3430 {
3431 u8 tx_seq = __get_reqseq(rx_control);
3432
3433 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3434
3435 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3436
3437 if (rx_control & L2CAP_CTRL_POLL) {
3438 chan->expected_ack_seq = tx_seq;
3439 l2cap_drop_acked_frames(chan);
3440
3441 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3442 l2cap_retransmit_one_frame(chan, tx_seq);
3443
3444 l2cap_ertm_send(chan);
3445
3446 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3447 chan->srej_save_reqseq = tx_seq;
3448 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3449 }
3450 } else if (rx_control & L2CAP_CTRL_FINAL) {
3451 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3452 chan->srej_save_reqseq == tx_seq)
3453 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3454 else
3455 l2cap_retransmit_one_frame(chan, tx_seq);
3456 } else {
3457 l2cap_retransmit_one_frame(chan, tx_seq);
3458 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3459 chan->srej_save_reqseq = tx_seq;
3460 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3461 }
3462 }
3463 }
3464
3465 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3466 {
3467 u8 tx_seq = __get_reqseq(rx_control);
3468
3469 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3470
3471 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3472 chan->expected_ack_seq = tx_seq;
3473 l2cap_drop_acked_frames(chan);
3474
3475 if (rx_control & L2CAP_CTRL_POLL)
3476 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3477
3478 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3479 del_timer(&chan->retrans_timer);
3480 if (rx_control & L2CAP_CTRL_POLL)
3481 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3482 return;
3483 }
3484
3485 if (rx_control & L2CAP_CTRL_POLL)
3486 l2cap_send_srejtail(chan);
3487 else
3488 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3489 }
3490
3491 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3492 {
3493 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3494
3495 if (L2CAP_CTRL_FINAL & rx_control &&
3496 chan->conn_state & L2CAP_CONN_WAIT_F) {
3497 del_timer(&chan->monitor_timer);
3498 if (chan->unacked_frames > 0)
3499 __mod_retrans_timer();
3500 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3501 }
3502
3503 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3504 case L2CAP_SUPER_RCV_READY:
3505 l2cap_data_channel_rrframe(chan, rx_control);
3506 break;
3507
3508 case L2CAP_SUPER_REJECT:
3509 l2cap_data_channel_rejframe(chan, rx_control);
3510 break;
3511
3512 case L2CAP_SUPER_SELECT_REJECT:
3513 l2cap_data_channel_srejframe(chan, rx_control);
3514 break;
3515
3516 case L2CAP_SUPER_RCV_NOT_READY:
3517 l2cap_data_channel_rnrframe(chan, rx_control);
3518 break;
3519 }
3520
3521 kfree_skb(skb);
3522 return 0;
3523 }
3524
3525 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3526 {
3527 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3528 u16 control;
3529 u8 req_seq;
3530 int len, next_tx_seq_offset, req_seq_offset;
3531
3532 control = get_unaligned_le16(skb->data);
3533 skb_pull(skb, 2);
3534 len = skb->len;
3535
3536 /*
3537 * We can just drop the corrupted I-frame here.
3538 * Receiver will miss it and start proper recovery
3539 * procedures and ask retransmission.
3540 */
3541 if (l2cap_check_fcs(chan, skb))
3542 goto drop;
3543
3544 if (__is_sar_start(control) && __is_iframe(control))
3545 len -= 2;
3546
3547 if (chan->fcs == L2CAP_FCS_CRC16)
3548 len -= 2;
3549
3550 if (len > chan->mps) {
3551 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3552 goto drop;
3553 }
3554
3555 req_seq = __get_reqseq(control);
3556 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3557 if (req_seq_offset < 0)
3558 req_seq_offset += 64;
3559
3560 next_tx_seq_offset =
3561 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3562 if (next_tx_seq_offset < 0)
3563 next_tx_seq_offset += 64;
3564
3565 /* check for invalid req-seq */
3566 if (req_seq_offset > next_tx_seq_offset) {
3567 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3568 goto drop;
3569 }
3570
3571 if (__is_iframe(control)) {
3572 if (len < 0) {
3573 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3574 goto drop;
3575 }
3576
3577 l2cap_data_channel_iframe(chan, control, skb);
3578 } else {
3579 if (len != 0) {
3580 BT_ERR("%d", len);
3581 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3582 goto drop;
3583 }
3584
3585 l2cap_data_channel_sframe(chan, control, skb);
3586 }
3587
3588 return 0;
3589
3590 drop:
3591 kfree_skb(skb);
3592 return 0;
3593 }
3594
3595 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3596 {
3597 struct l2cap_chan *chan;
3598 struct sock *sk = NULL;
3599 struct l2cap_pinfo *pi;
3600 u16 control;
3601 u8 tx_seq;
3602 int len;
3603
3604 chan = l2cap_get_chan_by_scid(conn, cid);
3605 if (!chan) {
3606 BT_DBG("unknown cid 0x%4.4x", cid);
3607 goto drop;
3608 }
3609
3610 sk = chan->sk;
3611 pi = l2cap_pi(sk);
3612
3613 BT_DBG("chan %p, len %d", chan, skb->len);
3614
3615 if (sk->sk_state != BT_CONNECTED)
3616 goto drop;
3617
3618 switch (chan->mode) {
3619 case L2CAP_MODE_BASIC:
3620 /* If socket recv buffers overflows we drop data here
3621 * which is *bad* because L2CAP has to be reliable.
3622 * But we don't have any other choice. L2CAP doesn't
3623 * provide flow control mechanism. */
3624
3625 if (chan->imtu < skb->len)
3626 goto drop;
3627
3628 if (!sock_queue_rcv_skb(sk, skb))
3629 goto done;
3630 break;
3631
3632 case L2CAP_MODE_ERTM:
3633 if (!sock_owned_by_user(sk)) {
3634 l2cap_ertm_data_rcv(sk, skb);
3635 } else {
3636 if (sk_add_backlog(sk, skb))
3637 goto drop;
3638 }
3639
3640 goto done;
3641
3642 case L2CAP_MODE_STREAMING:
3643 control = get_unaligned_le16(skb->data);
3644 skb_pull(skb, 2);
3645 len = skb->len;
3646
3647 if (l2cap_check_fcs(chan, skb))
3648 goto drop;
3649
3650 if (__is_sar_start(control))
3651 len -= 2;
3652
3653 if (chan->fcs == L2CAP_FCS_CRC16)
3654 len -= 2;
3655
3656 if (len > chan->mps || len < 0 || __is_sframe(control))
3657 goto drop;
3658
3659 tx_seq = __get_txseq(control);
3660
3661 if (chan->expected_tx_seq == tx_seq)
3662 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3663 else
3664 chan->expected_tx_seq = (tx_seq + 1) % 64;
3665
3666 l2cap_streaming_reassembly_sdu(chan, skb, control);
3667
3668 goto done;
3669
3670 default:
3671 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3672 break;
3673 }
3674
3675 drop:
3676 kfree_skb(skb);
3677
3678 done:
3679 if (sk)
3680 bh_unlock_sock(sk);
3681
3682 return 0;
3683 }
3684
3685 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3686 {
3687 struct sock *sk;
3688
3689 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3690 if (!sk)
3691 goto drop;
3692
3693 bh_lock_sock(sk);
3694
3695 BT_DBG("sk %p, len %d", sk, skb->len);
3696
3697 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3698 goto drop;
3699
3700 if (l2cap_pi(sk)->chan->imtu < skb->len)
3701 goto drop;
3702
3703 if (!sock_queue_rcv_skb(sk, skb))
3704 goto done;
3705
3706 drop:
3707 kfree_skb(skb);
3708
3709 done:
3710 if (sk)
3711 bh_unlock_sock(sk);
3712 return 0;
3713 }
3714
3715 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3716 {
3717 struct sock *sk;
3718
3719 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3720 if (!sk)
3721 goto drop;
3722
3723 bh_lock_sock(sk);
3724
3725 BT_DBG("sk %p, len %d", sk, skb->len);
3726
3727 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3728 goto drop;
3729
3730 if (l2cap_pi(sk)->chan->imtu < skb->len)
3731 goto drop;
3732
3733 if (!sock_queue_rcv_skb(sk, skb))
3734 goto done;
3735
3736 drop:
3737 kfree_skb(skb);
3738
3739 done:
3740 if (sk)
3741 bh_unlock_sock(sk);
3742 return 0;
3743 }
3744
3745 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3746 {
3747 struct l2cap_hdr *lh = (void *) skb->data;
3748 u16 cid, len;
3749 __le16 psm;
3750
3751 skb_pull(skb, L2CAP_HDR_SIZE);
3752 cid = __le16_to_cpu(lh->cid);
3753 len = __le16_to_cpu(lh->len);
3754
3755 if (len != skb->len) {
3756 kfree_skb(skb);
3757 return;
3758 }
3759
3760 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3761
3762 switch (cid) {
3763 case L2CAP_CID_LE_SIGNALING:
3764 case L2CAP_CID_SIGNALING:
3765 l2cap_sig_channel(conn, skb);
3766 break;
3767
3768 case L2CAP_CID_CONN_LESS:
3769 psm = get_unaligned_le16(skb->data);
3770 skb_pull(skb, 2);
3771 l2cap_conless_channel(conn, psm, skb);
3772 break;
3773
3774 case L2CAP_CID_LE_DATA:
3775 l2cap_att_channel(conn, cid, skb);
3776 break;
3777
3778 default:
3779 l2cap_data_channel(conn, cid, skb);
3780 break;
3781 }
3782 }
3783
3784 /* ---- L2CAP interface with lower layer (HCI) ---- */
3785
3786 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3787 {
3788 int exact = 0, lm1 = 0, lm2 = 0;
3789 register struct sock *sk;
3790 struct hlist_node *node;
3791
3792 if (type != ACL_LINK)
3793 return -EINVAL;
3794
3795 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3796
3797 /* Find listening sockets and check their link_mode */
3798 read_lock(&l2cap_sk_list.lock);
3799 sk_for_each(sk, node, &l2cap_sk_list.head) {
3800 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3801
3802 if (sk->sk_state != BT_LISTEN)
3803 continue;
3804
3805 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3806 lm1 |= HCI_LM_ACCEPT;
3807 if (chan->role_switch)
3808 lm1 |= HCI_LM_MASTER;
3809 exact++;
3810 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3811 lm2 |= HCI_LM_ACCEPT;
3812 if (chan->role_switch)
3813 lm2 |= HCI_LM_MASTER;
3814 }
3815 }
3816 read_unlock(&l2cap_sk_list.lock);
3817
3818 return exact ? lm1 : lm2;
3819 }
3820
3821 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3822 {
3823 struct l2cap_conn *conn;
3824
3825 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3826
3827 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3828 return -EINVAL;
3829
3830 if (!status) {
3831 conn = l2cap_conn_add(hcon, status);
3832 if (conn)
3833 l2cap_conn_ready(conn);
3834 } else
3835 l2cap_conn_del(hcon, bt_err(status));
3836
3837 return 0;
3838 }
3839
3840 static int l2cap_disconn_ind(struct hci_conn *hcon)
3841 {
3842 struct l2cap_conn *conn = hcon->l2cap_data;
3843
3844 BT_DBG("hcon %p", hcon);
3845
3846 if (hcon->type != ACL_LINK || !conn)
3847 return 0x13;
3848
3849 return conn->disc_reason;
3850 }
3851
3852 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3853 {
3854 BT_DBG("hcon %p reason %d", hcon, reason);
3855
3856 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3857 return -EINVAL;
3858
3859 l2cap_conn_del(hcon, bt_err(reason));
3860
3861 return 0;
3862 }
3863
3864 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3865 {
3866 struct sock *sk = chan->sk;
3867
3868 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3869 return;
3870
3871 if (encrypt == 0x00) {
3872 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3873 l2cap_sock_clear_timer(sk);
3874 l2cap_sock_set_timer(sk, HZ * 5);
3875 } else if (chan->sec_level == BT_SECURITY_HIGH)
3876 __l2cap_sock_close(sk, ECONNREFUSED);
3877 } else {
3878 if (chan->sec_level == BT_SECURITY_MEDIUM)
3879 l2cap_sock_clear_timer(sk);
3880 }
3881 }
3882
3883 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3884 {
3885 struct l2cap_conn *conn = hcon->l2cap_data;
3886 struct l2cap_chan *chan;
3887
3888 if (!conn)
3889 return 0;
3890
3891 BT_DBG("conn %p", conn);
3892
3893 read_lock(&conn->chan_lock);
3894
3895 list_for_each_entry(chan, &conn->chan_l, list) {
3896 struct sock *sk = chan->sk;
3897
3898 bh_lock_sock(sk);
3899
3900 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3901 bh_unlock_sock(sk);
3902 continue;
3903 }
3904
3905 if (!status && (sk->sk_state == BT_CONNECTED ||
3906 sk->sk_state == BT_CONFIG)) {
3907 l2cap_check_encryption(chan, encrypt);
3908 bh_unlock_sock(sk);
3909 continue;
3910 }
3911
3912 if (sk->sk_state == BT_CONNECT) {
3913 if (!status) {
3914 struct l2cap_conn_req req;
3915 req.scid = cpu_to_le16(chan->scid);
3916 req.psm = chan->psm;
3917
3918 chan->ident = l2cap_get_ident(conn);
3919 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3920
3921 l2cap_send_cmd(conn, chan->ident,
3922 L2CAP_CONN_REQ, sizeof(req), &req);
3923 } else {
3924 l2cap_sock_clear_timer(sk);
3925 l2cap_sock_set_timer(sk, HZ / 10);
3926 }
3927 } else if (sk->sk_state == BT_CONNECT2) {
3928 struct l2cap_conn_rsp rsp;
3929 __u16 result;
3930
3931 if (!status) {
3932 sk->sk_state = BT_CONFIG;
3933 result = L2CAP_CR_SUCCESS;
3934 } else {
3935 sk->sk_state = BT_DISCONN;
3936 l2cap_sock_set_timer(sk, HZ / 10);
3937 result = L2CAP_CR_SEC_BLOCK;
3938 }
3939
3940 rsp.scid = cpu_to_le16(chan->dcid);
3941 rsp.dcid = cpu_to_le16(chan->scid);
3942 rsp.result = cpu_to_le16(result);
3943 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3944 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3945 sizeof(rsp), &rsp);
3946 }
3947
3948 bh_unlock_sock(sk);
3949 }
3950
3951 read_unlock(&conn->chan_lock);
3952
3953 return 0;
3954 }
3955
3956 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3957 {
3958 struct l2cap_conn *conn = hcon->l2cap_data;
3959
3960 if (!conn)
3961 conn = l2cap_conn_add(hcon, 0);
3962
3963 if (!conn)
3964 goto drop;
3965
3966 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3967
3968 if (!(flags & ACL_CONT)) {
3969 struct l2cap_hdr *hdr;
3970 struct l2cap_chan *chan;
3971 u16 cid;
3972 int len;
3973
3974 if (conn->rx_len) {
3975 BT_ERR("Unexpected start frame (len %d)", skb->len);
3976 kfree_skb(conn->rx_skb);
3977 conn->rx_skb = NULL;
3978 conn->rx_len = 0;
3979 l2cap_conn_unreliable(conn, ECOMM);
3980 }
3981
3982 /* Start fragment always begin with Basic L2CAP header */
3983 if (skb->len < L2CAP_HDR_SIZE) {
3984 BT_ERR("Frame is too short (len %d)", skb->len);
3985 l2cap_conn_unreliable(conn, ECOMM);
3986 goto drop;
3987 }
3988
3989 hdr = (struct l2cap_hdr *) skb->data;
3990 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3991 cid = __le16_to_cpu(hdr->cid);
3992
3993 if (len == skb->len) {
3994 /* Complete frame received */
3995 l2cap_recv_frame(conn, skb);
3996 return 0;
3997 }
3998
3999 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4000
4001 if (skb->len > len) {
4002 BT_ERR("Frame is too long (len %d, expected len %d)",
4003 skb->len, len);
4004 l2cap_conn_unreliable(conn, ECOMM);
4005 goto drop;
4006 }
4007
4008 chan = l2cap_get_chan_by_scid(conn, cid);
4009
4010 if (chan && chan->sk) {
4011 struct sock *sk = chan->sk;
4012
4013 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4014 BT_ERR("Frame exceeding recv MTU (len %d, "
4015 "MTU %d)", len,
4016 chan->imtu);
4017 bh_unlock_sock(sk);
4018 l2cap_conn_unreliable(conn, ECOMM);
4019 goto drop;
4020 }
4021 bh_unlock_sock(sk);
4022 }
4023
4024 /* Allocate skb for the complete frame (with header) */
4025 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4026 if (!conn->rx_skb)
4027 goto drop;
4028
4029 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4030 skb->len);
4031 conn->rx_len = len - skb->len;
4032 } else {
4033 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4034
4035 if (!conn->rx_len) {
4036 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4037 l2cap_conn_unreliable(conn, ECOMM);
4038 goto drop;
4039 }
4040
4041 if (skb->len > conn->rx_len) {
4042 BT_ERR("Fragment is too long (len %d, expected %d)",
4043 skb->len, conn->rx_len);
4044 kfree_skb(conn->rx_skb);
4045 conn->rx_skb = NULL;
4046 conn->rx_len = 0;
4047 l2cap_conn_unreliable(conn, ECOMM);
4048 goto drop;
4049 }
4050
4051 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4052 skb->len);
4053 conn->rx_len -= skb->len;
4054
4055 if (!conn->rx_len) {
4056 /* Complete frame received */
4057 l2cap_recv_frame(conn, conn->rx_skb);
4058 conn->rx_skb = NULL;
4059 }
4060 }
4061
4062 drop:
4063 kfree_skb(skb);
4064 return 0;
4065 }
4066
4067 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4068 {
4069 struct sock *sk;
4070 struct hlist_node *node;
4071
4072 read_lock_bh(&l2cap_sk_list.lock);
4073
4074 sk_for_each(sk, node, &l2cap_sk_list.head) {
4075 struct l2cap_pinfo *pi = l2cap_pi(sk);
4076 struct l2cap_chan *chan = pi->chan;
4077
4078 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4079 batostr(&bt_sk(sk)->src),
4080 batostr(&bt_sk(sk)->dst),
4081 sk->sk_state, __le16_to_cpu(chan->psm),
4082 chan->scid, chan->dcid,
4083 chan->imtu, chan->omtu, chan->sec_level,
4084 chan->mode);
4085 }
4086
4087 read_unlock_bh(&l2cap_sk_list.lock);
4088
4089 return 0;
4090 }
4091
4092 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4093 {
4094 return single_open(file, l2cap_debugfs_show, inode->i_private);
4095 }
4096
4097 static const struct file_operations l2cap_debugfs_fops = {
4098 .open = l2cap_debugfs_open,
4099 .read = seq_read,
4100 .llseek = seq_lseek,
4101 .release = single_release,
4102 };
4103
4104 static struct dentry *l2cap_debugfs;
4105
4106 static struct hci_proto l2cap_hci_proto = {
4107 .name = "L2CAP",
4108 .id = HCI_PROTO_L2CAP,
4109 .connect_ind = l2cap_connect_ind,
4110 .connect_cfm = l2cap_connect_cfm,
4111 .disconn_ind = l2cap_disconn_ind,
4112 .disconn_cfm = l2cap_disconn_cfm,
4113 .security_cfm = l2cap_security_cfm,
4114 .recv_acldata = l2cap_recv_acldata
4115 };
4116
4117 int __init l2cap_init(void)
4118 {
4119 int err;
4120
4121 err = l2cap_init_sockets();
4122 if (err < 0)
4123 return err;
4124
4125 _busy_wq = create_singlethread_workqueue("l2cap");
4126 if (!_busy_wq) {
4127 err = -ENOMEM;
4128 goto error;
4129 }
4130
4131 err = hci_register_proto(&l2cap_hci_proto);
4132 if (err < 0) {
4133 BT_ERR("L2CAP protocol registration failed");
4134 bt_sock_unregister(BTPROTO_L2CAP);
4135 goto error;
4136 }
4137
4138 if (bt_debugfs) {
4139 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4140 bt_debugfs, NULL, &l2cap_debugfs_fops);
4141 if (!l2cap_debugfs)
4142 BT_ERR("Failed to create L2CAP debug file");
4143 }
4144
4145 return 0;
4146
4147 error:
4148 destroy_workqueue(_busy_wq);
4149 l2cap_cleanup_sockets();
4150 return err;
4151 }
4152
4153 void l2cap_exit(void)
4154 {
4155 debugfs_remove(l2cap_debugfs);
4156
4157 flush_workqueue(_busy_wq);
4158 destroy_workqueue(_busy_wq);
4159
4160 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4161 BT_ERR("L2CAP protocol unregistration failed");
4162
4163 l2cap_cleanup_sockets();
4164 }
4165
4166 module_param(disable_ertm, bool, 0644);
4167 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.131556 seconds and 6 git commands to generate.