2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static struct workqueue_struct
*_busy_wq
;
65 struct bt_sock_list l2cap_sk_list
= {
66 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
69 static void l2cap_busy_work(struct work_struct
*work
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
73 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
75 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
90 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
94 list_for_each_entry(c
, &conn
->chan_l
, list
) {
101 /* Find channel with given SCID.
102 * Returns locked socket */
103 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
105 struct l2cap_chan
*c
;
107 read_lock(&conn
->chan_lock
);
108 c
= __l2cap_get_chan_by_scid(conn
, cid
);
111 read_unlock(&conn
->chan_lock
);
115 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
117 struct l2cap_chan
*c
;
119 list_for_each_entry(c
, &conn
->chan_l
, list
) {
120 if (c
->ident
== ident
)
126 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
128 struct l2cap_chan
*c
;
130 read_lock(&conn
->chan_lock
);
131 c
= __l2cap_get_chan_by_ident(conn
, ident
);
134 read_unlock(&conn
->chan_lock
);
138 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
140 u16 cid
= L2CAP_CID_DYN_START
;
142 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
143 if (!__l2cap_get_chan_by_scid(conn
, cid
))
150 struct l2cap_chan
*l2cap_chan_alloc(struct sock
*sk
)
152 struct l2cap_chan
*chan
;
154 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
163 void l2cap_chan_free(struct l2cap_chan
*chan
)
168 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
170 struct sock
*sk
= chan
->sk
;
172 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
173 chan
->psm
, chan
->dcid
);
175 conn
->disc_reason
= 0x13;
179 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
180 if (conn
->hcon
->type
== LE_LINK
) {
182 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
183 chan
->scid
= L2CAP_CID_LE_DATA
;
184 chan
->dcid
= L2CAP_CID_LE_DATA
;
186 /* Alloc CID for connection-oriented socket */
187 chan
->scid
= l2cap_alloc_cid(conn
);
188 chan
->omtu
= L2CAP_DEFAULT_MTU
;
190 } else if (sk
->sk_type
== SOCK_DGRAM
) {
191 /* Connectionless socket */
192 chan
->scid
= L2CAP_CID_CONN_LESS
;
193 chan
->dcid
= L2CAP_CID_CONN_LESS
;
194 chan
->omtu
= L2CAP_DEFAULT_MTU
;
196 /* Raw socket can send/recv signalling messages only */
197 chan
->scid
= L2CAP_CID_SIGNALING
;
198 chan
->dcid
= L2CAP_CID_SIGNALING
;
199 chan
->omtu
= L2CAP_DEFAULT_MTU
;
204 list_add(&chan
->list
, &conn
->chan_l
);
208 * Must be called on the locked socket. */
209 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
211 struct sock
*sk
= chan
->sk
;
212 struct l2cap_conn
*conn
= chan
->conn
;
213 struct sock
*parent
= bt_sk(sk
)->parent
;
215 l2cap_sock_clear_timer(sk
);
217 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
220 /* Delete from channel list */
221 write_lock_bh(&conn
->chan_lock
);
222 list_del(&chan
->list
);
223 write_unlock_bh(&conn
->chan_lock
);
227 hci_conn_put(conn
->hcon
);
230 sk
->sk_state
= BT_CLOSED
;
231 sock_set_flag(sk
, SOCK_ZAPPED
);
237 bt_accept_unlink(sk
);
238 parent
->sk_data_ready(parent
, 0);
240 sk
->sk_state_change(sk
);
242 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
&&
243 chan
->conf_state
& L2CAP_CONF_INPUT_DONE
))
246 skb_queue_purge(&chan
->tx_q
);
248 if (chan
->mode
== L2CAP_MODE_ERTM
) {
249 struct srej_list
*l
, *tmp
;
251 del_timer(&chan
->retrans_timer
);
252 del_timer(&chan
->monitor_timer
);
253 del_timer(&chan
->ack_timer
);
255 skb_queue_purge(&chan
->srej_q
);
256 skb_queue_purge(&chan
->busy_q
);
258 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
265 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
267 struct sock
*sk
= chan
->sk
;
269 if (sk
->sk_type
== SOCK_RAW
) {
270 switch (chan
->sec_level
) {
271 case BT_SECURITY_HIGH
:
272 return HCI_AT_DEDICATED_BONDING_MITM
;
273 case BT_SECURITY_MEDIUM
:
274 return HCI_AT_DEDICATED_BONDING
;
276 return HCI_AT_NO_BONDING
;
278 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
279 if (chan
->sec_level
== BT_SECURITY_LOW
)
280 chan
->sec_level
= BT_SECURITY_SDP
;
282 if (chan
->sec_level
== BT_SECURITY_HIGH
)
283 return HCI_AT_NO_BONDING_MITM
;
285 return HCI_AT_NO_BONDING
;
287 switch (chan
->sec_level
) {
288 case BT_SECURITY_HIGH
:
289 return HCI_AT_GENERAL_BONDING_MITM
;
290 case BT_SECURITY_MEDIUM
:
291 return HCI_AT_GENERAL_BONDING
;
293 return HCI_AT_NO_BONDING
;
298 /* Service level security */
299 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
301 struct l2cap_conn
*conn
= chan
->conn
;
304 auth_type
= l2cap_get_auth_type(chan
);
306 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
309 u8
l2cap_get_ident(struct l2cap_conn
*conn
)
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn
->lock
);
321 if (++conn
->tx_ident
> 128)
326 spin_unlock_bh(&conn
->lock
);
331 void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
333 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
336 BT_DBG("code 0x%2.2x", code
);
341 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
342 flags
= ACL_START_NO_FLUSH
;
346 hci_send_acl(conn
->hcon
, skb
, flags
);
349 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
352 struct l2cap_hdr
*lh
;
353 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
354 struct l2cap_conn
*conn
= chan
->conn
;
355 struct sock
*sk
= (struct sock
*)pi
;
356 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
359 if (sk
->sk_state
!= BT_CONNECTED
)
362 if (chan
->fcs
== L2CAP_FCS_CRC16
)
365 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
367 count
= min_t(unsigned int, conn
->mtu
, hlen
);
368 control
|= L2CAP_CTRL_FRAME_TYPE
;
370 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
371 control
|= L2CAP_CTRL_FINAL
;
372 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
375 if (chan
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
376 control
|= L2CAP_CTRL_POLL
;
377 chan
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
380 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
384 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
385 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
386 lh
->cid
= cpu_to_le16(chan
->dcid
);
387 put_unaligned_le16(control
, skb_put(skb
, 2));
389 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
390 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
391 put_unaligned_le16(fcs
, skb_put(skb
, 2));
394 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
395 flags
= ACL_START_NO_FLUSH
;
399 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
402 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
404 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
405 control
|= L2CAP_SUPER_RCV_NOT_READY
;
406 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
408 control
|= L2CAP_SUPER_RCV_READY
;
410 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
412 l2cap_send_sframe(chan
, control
);
415 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
417 return !(chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
);
420 static void l2cap_do_start(struct l2cap_chan
*chan
)
422 struct l2cap_conn
*conn
= chan
->conn
;
424 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
425 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
428 if (l2cap_check_security(chan
) &&
429 __l2cap_no_conn_pending(chan
)) {
430 struct l2cap_conn_req req
;
431 req
.scid
= cpu_to_le16(chan
->scid
);
434 chan
->ident
= l2cap_get_ident(conn
);
435 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
437 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
441 struct l2cap_info_req req
;
442 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
444 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
445 conn
->info_ident
= l2cap_get_ident(conn
);
447 mod_timer(&conn
->info_timer
, jiffies
+
448 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
450 l2cap_send_cmd(conn
, conn
->info_ident
,
451 L2CAP_INFO_REQ
, sizeof(req
), &req
);
455 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
457 u32 local_feat_mask
= l2cap_feat_mask
;
459 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
462 case L2CAP_MODE_ERTM
:
463 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
464 case L2CAP_MODE_STREAMING
:
465 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
471 void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
474 struct l2cap_disconn_req req
;
481 if (chan
->mode
== L2CAP_MODE_ERTM
) {
482 del_timer(&chan
->retrans_timer
);
483 del_timer(&chan
->monitor_timer
);
484 del_timer(&chan
->ack_timer
);
487 req
.dcid
= cpu_to_le16(chan
->dcid
);
488 req
.scid
= cpu_to_le16(chan
->scid
);
489 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
490 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
492 sk
->sk_state
= BT_DISCONN
;
496 /* ---- L2CAP connections ---- */
497 static void l2cap_conn_start(struct l2cap_conn
*conn
)
499 struct l2cap_chan
*chan
, *tmp
;
501 BT_DBG("conn %p", conn
);
503 read_lock(&conn
->chan_lock
);
505 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
506 struct sock
*sk
= chan
->sk
;
510 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
511 sk
->sk_type
!= SOCK_STREAM
) {
516 if (sk
->sk_state
== BT_CONNECT
) {
517 struct l2cap_conn_req req
;
519 if (!l2cap_check_security(chan
) ||
520 !__l2cap_no_conn_pending(chan
)) {
525 if (!l2cap_mode_supported(chan
->mode
,
527 && chan
->conf_state
&
528 L2CAP_CONF_STATE2_DEVICE
) {
529 /* __l2cap_sock_close() calls list_del(chan)
530 * so release the lock */
531 read_unlock_bh(&conn
->chan_lock
);
532 __l2cap_sock_close(sk
, ECONNRESET
);
533 read_lock_bh(&conn
->chan_lock
);
538 req
.scid
= cpu_to_le16(chan
->scid
);
541 chan
->ident
= l2cap_get_ident(conn
);
542 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
544 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
547 } else if (sk
->sk_state
== BT_CONNECT2
) {
548 struct l2cap_conn_rsp rsp
;
550 rsp
.scid
= cpu_to_le16(chan
->dcid
);
551 rsp
.dcid
= cpu_to_le16(chan
->scid
);
553 if (l2cap_check_security(chan
)) {
554 if (bt_sk(sk
)->defer_setup
) {
555 struct sock
*parent
= bt_sk(sk
)->parent
;
556 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
557 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
558 parent
->sk_data_ready(parent
, 0);
561 sk
->sk_state
= BT_CONFIG
;
562 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
563 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
566 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
567 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
570 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
573 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
||
574 rsp
.result
!= L2CAP_CR_SUCCESS
) {
579 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
580 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
581 l2cap_build_conf_req(chan
, buf
), buf
);
582 chan
->num_conf_req
++;
588 read_unlock(&conn
->chan_lock
);
591 /* Find socket with cid and source bdaddr.
592 * Returns closest match, locked.
594 static struct sock
*l2cap_get_sock_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
596 struct sock
*sk
= NULL
, *sk1
= NULL
;
597 struct hlist_node
*node
;
599 read_lock(&l2cap_sk_list
.lock
);
601 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
602 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
604 if (state
&& sk
->sk_state
!= state
)
607 if (chan
->scid
== cid
) {
609 if (!bacmp(&bt_sk(sk
)->src
, src
))
613 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
618 read_unlock(&l2cap_sk_list
.lock
);
620 return node
? sk
: sk1
;
623 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
625 struct sock
*parent
, *sk
;
626 struct l2cap_chan
*chan
;
630 /* Check if we have socket listening on cid */
631 parent
= l2cap_get_sock_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
636 bh_lock_sock(parent
);
638 /* Check for backlog size */
639 if (sk_acceptq_is_full(parent
)) {
640 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
644 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
648 chan
= l2cap_chan_alloc(sk
);
654 l2cap_pi(sk
)->chan
= chan
;
656 write_lock_bh(&conn
->chan_lock
);
658 hci_conn_hold(conn
->hcon
);
660 l2cap_sock_init(sk
, parent
);
662 bacpy(&bt_sk(sk
)->src
, conn
->src
);
663 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
665 bt_accept_enqueue(parent
, sk
);
667 __l2cap_chan_add(conn
, chan
);
669 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
671 sk
->sk_state
= BT_CONNECTED
;
672 parent
->sk_data_ready(parent
, 0);
674 write_unlock_bh(&conn
->chan_lock
);
677 bh_unlock_sock(parent
);
680 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
682 struct l2cap_chan
*chan
;
684 BT_DBG("conn %p", conn
);
686 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
687 l2cap_le_conn_ready(conn
);
689 read_lock(&conn
->chan_lock
);
691 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
692 struct sock
*sk
= chan
->sk
;
696 if (conn
->hcon
->type
== LE_LINK
) {
697 l2cap_sock_clear_timer(sk
);
698 sk
->sk_state
= BT_CONNECTED
;
699 sk
->sk_state_change(sk
);
702 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
703 sk
->sk_type
!= SOCK_STREAM
) {
704 l2cap_sock_clear_timer(sk
);
705 sk
->sk_state
= BT_CONNECTED
;
706 sk
->sk_state_change(sk
);
707 } else if (sk
->sk_state
== BT_CONNECT
)
708 l2cap_do_start(chan
);
713 read_unlock(&conn
->chan_lock
);
716 /* Notify sockets that we cannot guaranty reliability anymore */
717 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
719 struct l2cap_chan
*chan
;
721 BT_DBG("conn %p", conn
);
723 read_lock(&conn
->chan_lock
);
725 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
726 struct sock
*sk
= chan
->sk
;
728 if (chan
->force_reliable
)
732 read_unlock(&conn
->chan_lock
);
735 static void l2cap_info_timeout(unsigned long arg
)
737 struct l2cap_conn
*conn
= (void *) arg
;
739 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
740 conn
->info_ident
= 0;
742 l2cap_conn_start(conn
);
745 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
747 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
752 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
756 hcon
->l2cap_data
= conn
;
759 BT_DBG("hcon %p conn %p", hcon
, conn
);
761 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
762 conn
->mtu
= hcon
->hdev
->le_mtu
;
764 conn
->mtu
= hcon
->hdev
->acl_mtu
;
766 conn
->src
= &hcon
->hdev
->bdaddr
;
767 conn
->dst
= &hcon
->dst
;
771 spin_lock_init(&conn
->lock
);
772 rwlock_init(&conn
->chan_lock
);
774 INIT_LIST_HEAD(&conn
->chan_l
);
776 if (hcon
->type
!= LE_LINK
)
777 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
778 (unsigned long) conn
);
780 conn
->disc_reason
= 0x13;
785 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
787 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
788 struct l2cap_chan
*chan
, *l
;
794 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
796 kfree_skb(conn
->rx_skb
);
799 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
802 l2cap_chan_del(chan
, err
);
807 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
808 del_timer_sync(&conn
->info_timer
);
810 hcon
->l2cap_data
= NULL
;
814 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
816 write_lock_bh(&conn
->chan_lock
);
817 __l2cap_chan_add(conn
, chan
);
818 write_unlock_bh(&conn
->chan_lock
);
821 /* ---- Socket interface ---- */
823 /* Find socket with psm and source bdaddr.
824 * Returns closest match.
826 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
828 struct sock
*sk
= NULL
, *sk1
= NULL
;
829 struct hlist_node
*node
;
831 read_lock(&l2cap_sk_list
.lock
);
833 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
834 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
836 if (state
&& sk
->sk_state
!= state
)
839 if (chan
->psm
== psm
) {
841 if (!bacmp(&bt_sk(sk
)->src
, src
))
845 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
850 read_unlock(&l2cap_sk_list
.lock
);
852 return node
? sk
: sk1
;
855 int l2cap_chan_connect(struct l2cap_chan
*chan
)
857 struct sock
*sk
= chan
->sk
;
858 bdaddr_t
*src
= &bt_sk(sk
)->src
;
859 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
860 struct l2cap_conn
*conn
;
861 struct hci_conn
*hcon
;
862 struct hci_dev
*hdev
;
866 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
869 hdev
= hci_get_route(dst
, src
);
871 return -EHOSTUNREACH
;
873 hci_dev_lock_bh(hdev
);
875 auth_type
= l2cap_get_auth_type(chan
);
877 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
878 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
879 chan
->sec_level
, auth_type
);
881 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
882 chan
->sec_level
, auth_type
);
889 conn
= l2cap_conn_add(hcon
, 0);
896 /* Update source addr of the socket */
897 bacpy(src
, conn
->src
);
899 l2cap_chan_add(conn
, chan
);
901 sk
->sk_state
= BT_CONNECT
;
902 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
904 if (hcon
->state
== BT_CONNECTED
) {
905 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
906 sk
->sk_type
!= SOCK_STREAM
) {
907 l2cap_sock_clear_timer(sk
);
908 if (l2cap_check_security(chan
))
909 sk
->sk_state
= BT_CONNECTED
;
911 l2cap_do_start(chan
);
917 hci_dev_unlock_bh(hdev
);
922 int __l2cap_wait_ack(struct sock
*sk
)
924 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
925 DECLARE_WAITQUEUE(wait
, current
);
929 add_wait_queue(sk_sleep(sk
), &wait
);
930 while ((chan
->unacked_frames
> 0 && chan
->conn
)) {
931 set_current_state(TASK_INTERRUPTIBLE
);
936 if (signal_pending(current
)) {
937 err
= sock_intr_errno(timeo
);
942 timeo
= schedule_timeout(timeo
);
945 err
= sock_error(sk
);
949 set_current_state(TASK_RUNNING
);
950 remove_wait_queue(sk_sleep(sk
), &wait
);
954 static void l2cap_monitor_timeout(unsigned long arg
)
956 struct l2cap_chan
*chan
= (void *) arg
;
957 struct sock
*sk
= chan
->sk
;
959 BT_DBG("chan %p", chan
);
962 if (chan
->retry_count
>= chan
->remote_max_tx
) {
963 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
969 __mod_monitor_timer();
971 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
975 static void l2cap_retrans_timeout(unsigned long arg
)
977 struct l2cap_chan
*chan
= (void *) arg
;
978 struct sock
*sk
= chan
->sk
;
980 BT_DBG("chan %p", chan
);
983 chan
->retry_count
= 1;
984 __mod_monitor_timer();
986 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
988 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
992 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
996 while ((skb
= skb_peek(&chan
->tx_q
)) &&
997 chan
->unacked_frames
) {
998 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1001 skb
= skb_dequeue(&chan
->tx_q
);
1004 chan
->unacked_frames
--;
1007 if (!chan
->unacked_frames
)
1008 del_timer(&chan
->retrans_timer
);
1011 void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1013 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1016 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1018 if (!chan
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1019 flags
= ACL_START_NO_FLUSH
;
1023 hci_send_acl(hcon
, skb
, flags
);
1026 void l2cap_streaming_send(struct l2cap_chan
*chan
)
1028 struct sk_buff
*skb
;
1031 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1032 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1033 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1034 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1036 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1037 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1038 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1041 l2cap_do_send(chan
, skb
);
1043 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1047 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1049 struct sk_buff
*skb
, *tx_skb
;
1052 skb
= skb_peek(&chan
->tx_q
);
1057 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1060 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1063 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1065 if (chan
->remote_max_tx
&&
1066 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1067 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1071 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1072 bt_cb(skb
)->retries
++;
1073 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1074 control
&= L2CAP_CTRL_SAR
;
1076 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1077 control
|= L2CAP_CTRL_FINAL
;
1078 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1081 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1082 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1084 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1086 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1087 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1088 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1091 l2cap_do_send(chan
, tx_skb
);
1094 int l2cap_ertm_send(struct l2cap_chan
*chan
)
1096 struct sk_buff
*skb
, *tx_skb
;
1097 struct sock
*sk
= chan
->sk
;
1101 if (sk
->sk_state
!= BT_CONNECTED
)
1104 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1106 if (chan
->remote_max_tx
&&
1107 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1108 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1112 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1114 bt_cb(skb
)->retries
++;
1116 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1117 control
&= L2CAP_CTRL_SAR
;
1119 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1120 control
|= L2CAP_CTRL_FINAL
;
1121 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1123 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1124 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1125 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1128 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1129 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1130 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1133 l2cap_do_send(chan
, tx_skb
);
1135 __mod_retrans_timer();
1137 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1138 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1140 if (bt_cb(skb
)->retries
== 1)
1141 chan
->unacked_frames
++;
1143 chan
->frames_sent
++;
1145 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1146 chan
->tx_send_head
= NULL
;
1148 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1156 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1160 if (!skb_queue_empty(&chan
->tx_q
))
1161 chan
->tx_send_head
= chan
->tx_q
.next
;
1163 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1164 ret
= l2cap_ertm_send(chan
);
1168 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1172 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1174 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1175 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1176 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1177 l2cap_send_sframe(chan
, control
);
1181 if (l2cap_ertm_send(chan
) > 0)
1184 control
|= L2CAP_SUPER_RCV_READY
;
1185 l2cap_send_sframe(chan
, control
);
1188 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1190 struct srej_list
*tail
;
1193 control
= L2CAP_SUPER_SELECT_REJECT
;
1194 control
|= L2CAP_CTRL_FINAL
;
1196 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1197 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1199 l2cap_send_sframe(chan
, control
);
1202 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1204 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1205 struct sk_buff
**frag
;
1208 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1214 /* Continuation fragments (no L2CAP header) */
1215 frag
= &skb_shinfo(skb
)->frag_list
;
1217 count
= min_t(unsigned int, conn
->mtu
, len
);
1219 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1222 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1228 frag
= &(*frag
)->next
;
1234 struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1236 struct sock
*sk
= chan
->sk
;
1237 struct l2cap_conn
*conn
= chan
->conn
;
1238 struct sk_buff
*skb
;
1239 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1240 struct l2cap_hdr
*lh
;
1242 BT_DBG("sk %p len %d", sk
, (int)len
);
1244 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1245 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1246 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1248 return ERR_PTR(err
);
1250 /* Create L2CAP header */
1251 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1252 lh
->cid
= cpu_to_le16(chan
->dcid
);
1253 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1254 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1256 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1257 if (unlikely(err
< 0)) {
1259 return ERR_PTR(err
);
1264 struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1266 struct sock
*sk
= chan
->sk
;
1267 struct l2cap_conn
*conn
= chan
->conn
;
1268 struct sk_buff
*skb
;
1269 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1270 struct l2cap_hdr
*lh
;
1272 BT_DBG("sk %p len %d", sk
, (int)len
);
1274 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1275 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1276 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1278 return ERR_PTR(err
);
1280 /* Create L2CAP header */
1281 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1282 lh
->cid
= cpu_to_le16(chan
->dcid
);
1283 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1285 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1286 if (unlikely(err
< 0)) {
1288 return ERR_PTR(err
);
1293 struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1295 struct sock
*sk
= chan
->sk
;
1296 struct l2cap_conn
*conn
= chan
->conn
;
1297 struct sk_buff
*skb
;
1298 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1299 struct l2cap_hdr
*lh
;
1301 BT_DBG("sk %p len %d", sk
, (int)len
);
1304 return ERR_PTR(-ENOTCONN
);
1309 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1312 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1313 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1314 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1316 return ERR_PTR(err
);
1318 /* Create L2CAP header */
1319 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1320 lh
->cid
= cpu_to_le16(chan
->dcid
);
1321 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1322 put_unaligned_le16(control
, skb_put(skb
, 2));
1324 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1326 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1327 if (unlikely(err
< 0)) {
1329 return ERR_PTR(err
);
1332 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1333 put_unaligned_le16(0, skb_put(skb
, 2));
1335 bt_cb(skb
)->retries
= 0;
1339 int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1341 struct sk_buff
*skb
;
1342 struct sk_buff_head sar_queue
;
1346 skb_queue_head_init(&sar_queue
);
1347 control
= L2CAP_SDU_START
;
1348 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1350 return PTR_ERR(skb
);
1352 __skb_queue_tail(&sar_queue
, skb
);
1353 len
-= chan
->remote_mps
;
1354 size
+= chan
->remote_mps
;
1359 if (len
> chan
->remote_mps
) {
1360 control
= L2CAP_SDU_CONTINUE
;
1361 buflen
= chan
->remote_mps
;
1363 control
= L2CAP_SDU_END
;
1367 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1369 skb_queue_purge(&sar_queue
);
1370 return PTR_ERR(skb
);
1373 __skb_queue_tail(&sar_queue
, skb
);
1377 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1378 if (chan
->tx_send_head
== NULL
)
1379 chan
->tx_send_head
= sar_queue
.next
;
1384 static void l2cap_chan_ready(struct sock
*sk
)
1386 struct sock
*parent
= bt_sk(sk
)->parent
;
1387 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1389 BT_DBG("sk %p, parent %p", sk
, parent
);
1391 chan
->conf_state
= 0;
1392 l2cap_sock_clear_timer(sk
);
1395 /* Outgoing channel.
1396 * Wake up socket sleeping on connect.
1398 sk
->sk_state
= BT_CONNECTED
;
1399 sk
->sk_state_change(sk
);
1401 /* Incoming channel.
1402 * Wake up socket sleeping on accept.
1404 parent
->sk_data_ready(parent
, 0);
1408 /* Copy frame to all raw sockets on that connection */
1409 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1411 struct sk_buff
*nskb
;
1412 struct l2cap_chan
*chan
;
1414 BT_DBG("conn %p", conn
);
1416 read_lock(&conn
->chan_lock
);
1417 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1418 struct sock
*sk
= chan
->sk
;
1419 if (sk
->sk_type
!= SOCK_RAW
)
1422 /* Don't send frame to the socket it came from */
1425 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1429 if (sock_queue_rcv_skb(sk
, nskb
))
1432 read_unlock(&conn
->chan_lock
);
1435 /* ---- L2CAP signalling commands ---- */
1436 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1437 u8 code
, u8 ident
, u16 dlen
, void *data
)
1439 struct sk_buff
*skb
, **frag
;
1440 struct l2cap_cmd_hdr
*cmd
;
1441 struct l2cap_hdr
*lh
;
1444 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1445 conn
, code
, ident
, dlen
);
1447 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1448 count
= min_t(unsigned int, conn
->mtu
, len
);
1450 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1454 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1455 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1457 if (conn
->hcon
->type
== LE_LINK
)
1458 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1460 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1462 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1465 cmd
->len
= cpu_to_le16(dlen
);
1468 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1469 memcpy(skb_put(skb
, count
), data
, count
);
1475 /* Continuation fragments (no L2CAP header) */
1476 frag
= &skb_shinfo(skb
)->frag_list
;
1478 count
= min_t(unsigned int, conn
->mtu
, len
);
1480 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1484 memcpy(skb_put(*frag
, count
), data
, count
);
1489 frag
= &(*frag
)->next
;
1499 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1501 struct l2cap_conf_opt
*opt
= *ptr
;
1504 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1512 *val
= *((u8
*) opt
->val
);
1516 *val
= get_unaligned_le16(opt
->val
);
1520 *val
= get_unaligned_le32(opt
->val
);
1524 *val
= (unsigned long) opt
->val
;
1528 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1532 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1534 struct l2cap_conf_opt
*opt
= *ptr
;
1536 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1543 *((u8
*) opt
->val
) = val
;
1547 put_unaligned_le16(val
, opt
->val
);
1551 put_unaligned_le32(val
, opt
->val
);
1555 memcpy(opt
->val
, (void *) val
, len
);
1559 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1562 static void l2cap_ack_timeout(unsigned long arg
)
1564 struct l2cap_chan
*chan
= (void *) arg
;
1566 bh_lock_sock(chan
->sk
);
1567 l2cap_send_ack(chan
);
1568 bh_unlock_sock(chan
->sk
);
1571 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1573 struct sock
*sk
= chan
->sk
;
1575 chan
->expected_ack_seq
= 0;
1576 chan
->unacked_frames
= 0;
1577 chan
->buffer_seq
= 0;
1578 chan
->num_acked
= 0;
1579 chan
->frames_sent
= 0;
1581 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1582 (unsigned long) chan
);
1583 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1584 (unsigned long) chan
);
1585 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1587 skb_queue_head_init(&chan
->srej_q
);
1588 skb_queue_head_init(&chan
->busy_q
);
1590 INIT_LIST_HEAD(&chan
->srej_l
);
1592 INIT_WORK(&chan
->busy_work
, l2cap_busy_work
);
1594 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1597 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1600 case L2CAP_MODE_STREAMING
:
1601 case L2CAP_MODE_ERTM
:
1602 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1606 return L2CAP_MODE_BASIC
;
1610 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1612 struct l2cap_conf_req
*req
= data
;
1613 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1614 void *ptr
= req
->data
;
1616 BT_DBG("chan %p", chan
);
1618 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1621 switch (chan
->mode
) {
1622 case L2CAP_MODE_STREAMING
:
1623 case L2CAP_MODE_ERTM
:
1624 if (chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1629 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1634 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1635 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1637 switch (chan
->mode
) {
1638 case L2CAP_MODE_BASIC
:
1639 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1640 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1643 rfc
.mode
= L2CAP_MODE_BASIC
;
1645 rfc
.max_transmit
= 0;
1646 rfc
.retrans_timeout
= 0;
1647 rfc
.monitor_timeout
= 0;
1648 rfc
.max_pdu_size
= 0;
1650 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1651 (unsigned long) &rfc
);
1654 case L2CAP_MODE_ERTM
:
1655 rfc
.mode
= L2CAP_MODE_ERTM
;
1656 rfc
.txwin_size
= chan
->tx_win
;
1657 rfc
.max_transmit
= chan
->max_tx
;
1658 rfc
.retrans_timeout
= 0;
1659 rfc
.monitor_timeout
= 0;
1660 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1662 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1664 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1665 (unsigned long) &rfc
);
1667 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1670 if (chan
->fcs
== L2CAP_FCS_NONE
||
1671 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1672 chan
->fcs
= L2CAP_FCS_NONE
;
1673 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1677 case L2CAP_MODE_STREAMING
:
1678 rfc
.mode
= L2CAP_MODE_STREAMING
;
1680 rfc
.max_transmit
= 0;
1681 rfc
.retrans_timeout
= 0;
1682 rfc
.monitor_timeout
= 0;
1683 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1684 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1685 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1687 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1688 (unsigned long) &rfc
);
1690 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1693 if (chan
->fcs
== L2CAP_FCS_NONE
||
1694 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1695 chan
->fcs
= L2CAP_FCS_NONE
;
1696 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1701 req
->dcid
= cpu_to_le16(chan
->dcid
);
1702 req
->flags
= cpu_to_le16(0);
1707 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1709 struct l2cap_conf_rsp
*rsp
= data
;
1710 void *ptr
= rsp
->data
;
1711 void *req
= chan
->conf_req
;
1712 int len
= chan
->conf_len
;
1713 int type
, hint
, olen
;
1715 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1716 u16 mtu
= L2CAP_DEFAULT_MTU
;
1717 u16 result
= L2CAP_CONF_SUCCESS
;
1719 BT_DBG("chan %p", chan
);
1721 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1722 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1724 hint
= type
& L2CAP_CONF_HINT
;
1725 type
&= L2CAP_CONF_MASK
;
1728 case L2CAP_CONF_MTU
:
1732 case L2CAP_CONF_FLUSH_TO
:
1733 chan
->flush_to
= val
;
1736 case L2CAP_CONF_QOS
:
1739 case L2CAP_CONF_RFC
:
1740 if (olen
== sizeof(rfc
))
1741 memcpy(&rfc
, (void *) val
, olen
);
1744 case L2CAP_CONF_FCS
:
1745 if (val
== L2CAP_FCS_NONE
)
1746 chan
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
1754 result
= L2CAP_CONF_UNKNOWN
;
1755 *((u8
*) ptr
++) = type
;
1760 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
1763 switch (chan
->mode
) {
1764 case L2CAP_MODE_STREAMING
:
1765 case L2CAP_MODE_ERTM
:
1766 if (!(chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
1767 chan
->mode
= l2cap_select_mode(rfc
.mode
,
1768 chan
->conn
->feat_mask
);
1772 if (chan
->mode
!= rfc
.mode
)
1773 return -ECONNREFUSED
;
1779 if (chan
->mode
!= rfc
.mode
) {
1780 result
= L2CAP_CONF_UNACCEPT
;
1781 rfc
.mode
= chan
->mode
;
1783 if (chan
->num_conf_rsp
== 1)
1784 return -ECONNREFUSED
;
1786 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1787 sizeof(rfc
), (unsigned long) &rfc
);
1791 if (result
== L2CAP_CONF_SUCCESS
) {
1792 /* Configure output options and let the other side know
1793 * which ones we don't like. */
1795 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
1796 result
= L2CAP_CONF_UNACCEPT
;
1799 chan
->conf_state
|= L2CAP_CONF_MTU_DONE
;
1801 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
1804 case L2CAP_MODE_BASIC
:
1805 chan
->fcs
= L2CAP_FCS_NONE
;
1806 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1809 case L2CAP_MODE_ERTM
:
1810 chan
->remote_tx_win
= rfc
.txwin_size
;
1811 chan
->remote_max_tx
= rfc
.max_transmit
;
1813 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
1814 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1816 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1818 rfc
.retrans_timeout
=
1819 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
1820 rfc
.monitor_timeout
=
1821 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
1823 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1825 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1826 sizeof(rfc
), (unsigned long) &rfc
);
1830 case L2CAP_MODE_STREAMING
:
1831 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
1832 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1834 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1836 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1838 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1839 sizeof(rfc
), (unsigned long) &rfc
);
1844 result
= L2CAP_CONF_UNACCEPT
;
1846 memset(&rfc
, 0, sizeof(rfc
));
1847 rfc
.mode
= chan
->mode
;
1850 if (result
== L2CAP_CONF_SUCCESS
)
1851 chan
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
1853 rsp
->scid
= cpu_to_le16(chan
->dcid
);
1854 rsp
->result
= cpu_to_le16(result
);
1855 rsp
->flags
= cpu_to_le16(0x0000);
1860 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
1862 struct l2cap_conf_req
*req
= data
;
1863 void *ptr
= req
->data
;
1866 struct l2cap_conf_rfc rfc
;
1868 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
1870 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1871 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1874 case L2CAP_CONF_MTU
:
1875 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
1876 *result
= L2CAP_CONF_UNACCEPT
;
1877 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
1880 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1883 case L2CAP_CONF_FLUSH_TO
:
1884 chan
->flush_to
= val
;
1885 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
1889 case L2CAP_CONF_RFC
:
1890 if (olen
== sizeof(rfc
))
1891 memcpy(&rfc
, (void *)val
, olen
);
1893 if ((chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
1894 rfc
.mode
!= chan
->mode
)
1895 return -ECONNREFUSED
;
1899 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1900 sizeof(rfc
), (unsigned long) &rfc
);
1905 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
1906 return -ECONNREFUSED
;
1908 chan
->mode
= rfc
.mode
;
1910 if (*result
== L2CAP_CONF_SUCCESS
) {
1912 case L2CAP_MODE_ERTM
:
1913 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1914 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1915 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1917 case L2CAP_MODE_STREAMING
:
1918 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1922 req
->dcid
= cpu_to_le16(chan
->dcid
);
1923 req
->flags
= cpu_to_le16(0x0000);
1928 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
1930 struct l2cap_conf_rsp
*rsp
= data
;
1931 void *ptr
= rsp
->data
;
1933 BT_DBG("chan %p", chan
);
1935 rsp
->scid
= cpu_to_le16(chan
->dcid
);
1936 rsp
->result
= cpu_to_le16(result
);
1937 rsp
->flags
= cpu_to_le16(flags
);
1942 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
1944 struct l2cap_conn_rsp rsp
;
1945 struct l2cap_conn
*conn
= chan
->conn
;
1948 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1949 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1950 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1951 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1952 l2cap_send_cmd(conn
, chan
->ident
,
1953 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1955 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
1958 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
1959 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1960 l2cap_build_conf_req(chan
, buf
), buf
);
1961 chan
->num_conf_req
++;
1964 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
1968 struct l2cap_conf_rfc rfc
;
1970 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
1972 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
1975 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1976 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1979 case L2CAP_CONF_RFC
:
1980 if (olen
== sizeof(rfc
))
1981 memcpy(&rfc
, (void *)val
, olen
);
1988 case L2CAP_MODE_ERTM
:
1989 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1990 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1991 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1993 case L2CAP_MODE_STREAMING
:
1994 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1998 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2000 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2002 if (rej
->reason
!= 0x0000)
2005 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2006 cmd
->ident
== conn
->info_ident
) {
2007 del_timer(&conn
->info_timer
);
2009 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2010 conn
->info_ident
= 0;
2012 l2cap_conn_start(conn
);
2018 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2020 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2021 struct l2cap_conn_rsp rsp
;
2022 struct l2cap_chan
*chan
= NULL
;
2023 struct sock
*parent
, *sk
= NULL
;
2024 int result
, status
= L2CAP_CS_NO_INFO
;
2026 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2027 __le16 psm
= req
->psm
;
2029 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2031 /* Check if we have socket listening on psm */
2032 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2034 result
= L2CAP_CR_BAD_PSM
;
2038 bh_lock_sock(parent
);
2040 /* Check if the ACL is secure enough (if not SDP) */
2041 if (psm
!= cpu_to_le16(0x0001) &&
2042 !hci_conn_check_link_mode(conn
->hcon
)) {
2043 conn
->disc_reason
= 0x05;
2044 result
= L2CAP_CR_SEC_BLOCK
;
2048 result
= L2CAP_CR_NO_MEM
;
2050 /* Check for backlog size */
2051 if (sk_acceptq_is_full(parent
)) {
2052 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2056 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2060 chan
= l2cap_chan_alloc(sk
);
2062 l2cap_sock_kill(sk
);
2066 l2cap_pi(sk
)->chan
= chan
;
2068 write_lock_bh(&conn
->chan_lock
);
2070 /* Check if we already have channel with that dcid */
2071 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2072 write_unlock_bh(&conn
->chan_lock
);
2073 sock_set_flag(sk
, SOCK_ZAPPED
);
2074 l2cap_sock_kill(sk
);
2078 hci_conn_hold(conn
->hcon
);
2080 l2cap_sock_init(sk
, parent
);
2081 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2082 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2086 bt_accept_enqueue(parent
, sk
);
2088 __l2cap_chan_add(conn
, chan
);
2092 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2094 chan
->ident
= cmd
->ident
;
2096 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2097 if (l2cap_check_security(chan
)) {
2098 if (bt_sk(sk
)->defer_setup
) {
2099 sk
->sk_state
= BT_CONNECT2
;
2100 result
= L2CAP_CR_PEND
;
2101 status
= L2CAP_CS_AUTHOR_PEND
;
2102 parent
->sk_data_ready(parent
, 0);
2104 sk
->sk_state
= BT_CONFIG
;
2105 result
= L2CAP_CR_SUCCESS
;
2106 status
= L2CAP_CS_NO_INFO
;
2109 sk
->sk_state
= BT_CONNECT2
;
2110 result
= L2CAP_CR_PEND
;
2111 status
= L2CAP_CS_AUTHEN_PEND
;
2114 sk
->sk_state
= BT_CONNECT2
;
2115 result
= L2CAP_CR_PEND
;
2116 status
= L2CAP_CS_NO_INFO
;
2119 write_unlock_bh(&conn
->chan_lock
);
2122 bh_unlock_sock(parent
);
2125 rsp
.scid
= cpu_to_le16(scid
);
2126 rsp
.dcid
= cpu_to_le16(dcid
);
2127 rsp
.result
= cpu_to_le16(result
);
2128 rsp
.status
= cpu_to_le16(status
);
2129 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2131 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2132 struct l2cap_info_req info
;
2133 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2135 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2136 conn
->info_ident
= l2cap_get_ident(conn
);
2138 mod_timer(&conn
->info_timer
, jiffies
+
2139 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2141 l2cap_send_cmd(conn
, conn
->info_ident
,
2142 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2145 if (chan
&& !(chan
->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2146 result
== L2CAP_CR_SUCCESS
) {
2148 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2149 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2150 l2cap_build_conf_req(chan
, buf
), buf
);
2151 chan
->num_conf_req
++;
2157 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2159 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2160 u16 scid
, dcid
, result
, status
;
2161 struct l2cap_chan
*chan
;
2165 scid
= __le16_to_cpu(rsp
->scid
);
2166 dcid
= __le16_to_cpu(rsp
->dcid
);
2167 result
= __le16_to_cpu(rsp
->result
);
2168 status
= __le16_to_cpu(rsp
->status
);
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2173 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2177 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2185 case L2CAP_CR_SUCCESS
:
2186 sk
->sk_state
= BT_CONFIG
;
2189 chan
->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2191 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
2194 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2196 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2197 l2cap_build_conf_req(chan
, req
), req
);
2198 chan
->num_conf_req
++;
2202 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2206 /* don't delete l2cap channel if sk is owned by user */
2207 if (sock_owned_by_user(sk
)) {
2208 sk
->sk_state
= BT_DISCONN
;
2209 l2cap_sock_clear_timer(sk
);
2210 l2cap_sock_set_timer(sk
, HZ
/ 5);
2214 l2cap_chan_del(chan
, ECONNREFUSED
);
2222 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2224 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
2226 /* FCS is enabled only in ERTM or streaming mode, if one or both
2229 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2230 chan
->fcs
= L2CAP_FCS_NONE
;
2231 else if (!(pi
->chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2232 chan
->fcs
= L2CAP_FCS_CRC16
;
2235 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2237 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2240 struct l2cap_chan
*chan
;
2244 dcid
= __le16_to_cpu(req
->dcid
);
2245 flags
= __le16_to_cpu(req
->flags
);
2247 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2249 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2255 if (sk
->sk_state
!= BT_CONFIG
) {
2256 struct l2cap_cmd_rej rej
;
2258 rej
.reason
= cpu_to_le16(0x0002);
2259 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2264 /* Reject if config buffer is too small. */
2265 len
= cmd_len
- sizeof(*req
);
2266 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2267 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2268 l2cap_build_conf_rsp(chan
, rsp
,
2269 L2CAP_CONF_REJECT
, flags
), rsp
);
2274 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2275 chan
->conf_len
+= len
;
2277 if (flags
& 0x0001) {
2278 /* Incomplete config. Send empty response. */
2279 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2280 l2cap_build_conf_rsp(chan
, rsp
,
2281 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2285 /* Complete config. */
2286 len
= l2cap_parse_conf_req(chan
, rsp
);
2288 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2292 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2293 chan
->num_conf_rsp
++;
2295 /* Reset config buffer. */
2298 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2301 if (chan
->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2302 set_default_fcs(chan
);
2304 sk
->sk_state
= BT_CONNECTED
;
2306 chan
->next_tx_seq
= 0;
2307 chan
->expected_tx_seq
= 0;
2308 skb_queue_head_init(&chan
->tx_q
);
2309 if (chan
->mode
== L2CAP_MODE_ERTM
)
2310 l2cap_ertm_init(chan
);
2312 l2cap_chan_ready(sk
);
2316 if (!(chan
->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2318 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2319 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2320 l2cap_build_conf_req(chan
, buf
), buf
);
2321 chan
->num_conf_req
++;
2329 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2331 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2332 u16 scid
, flags
, result
;
2333 struct l2cap_chan
*chan
;
2335 int len
= cmd
->len
- sizeof(*rsp
);
2337 scid
= __le16_to_cpu(rsp
->scid
);
2338 flags
= __le16_to_cpu(rsp
->flags
);
2339 result
= __le16_to_cpu(rsp
->result
);
2341 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2342 scid
, flags
, result
);
2344 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2351 case L2CAP_CONF_SUCCESS
:
2352 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2355 case L2CAP_CONF_UNACCEPT
:
2356 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2359 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2360 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2364 /* throw out any old stored conf requests */
2365 result
= L2CAP_CONF_SUCCESS
;
2366 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2369 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2373 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2374 L2CAP_CONF_REQ
, len
, req
);
2375 chan
->num_conf_req
++;
2376 if (result
!= L2CAP_CONF_SUCCESS
)
2382 sk
->sk_err
= ECONNRESET
;
2383 l2cap_sock_set_timer(sk
, HZ
* 5);
2384 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2391 chan
->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2393 if (chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2394 set_default_fcs(chan
);
2396 sk
->sk_state
= BT_CONNECTED
;
2397 chan
->next_tx_seq
= 0;
2398 chan
->expected_tx_seq
= 0;
2399 skb_queue_head_init(&chan
->tx_q
);
2400 if (chan
->mode
== L2CAP_MODE_ERTM
)
2401 l2cap_ertm_init(chan
);
2403 l2cap_chan_ready(sk
);
2411 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2413 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2414 struct l2cap_disconn_rsp rsp
;
2416 struct l2cap_chan
*chan
;
2419 scid
= __le16_to_cpu(req
->scid
);
2420 dcid
= __le16_to_cpu(req
->dcid
);
2422 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2424 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2430 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2431 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2432 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2434 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2436 /* don't delete l2cap channel if sk is owned by user */
2437 if (sock_owned_by_user(sk
)) {
2438 sk
->sk_state
= BT_DISCONN
;
2439 l2cap_sock_clear_timer(sk
);
2440 l2cap_sock_set_timer(sk
, HZ
/ 5);
2445 l2cap_chan_del(chan
, ECONNRESET
);
2448 l2cap_sock_kill(sk
);
2452 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2454 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2456 struct l2cap_chan
*chan
;
2459 scid
= __le16_to_cpu(rsp
->scid
);
2460 dcid
= __le16_to_cpu(rsp
->dcid
);
2462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2464 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2470 /* don't delete l2cap channel if sk is owned by user */
2471 if (sock_owned_by_user(sk
)) {
2472 sk
->sk_state
= BT_DISCONN
;
2473 l2cap_sock_clear_timer(sk
);
2474 l2cap_sock_set_timer(sk
, HZ
/ 5);
2479 l2cap_chan_del(chan
, 0);
2482 l2cap_sock_kill(sk
);
2486 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2488 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2491 type
= __le16_to_cpu(req
->type
);
2493 BT_DBG("type 0x%4.4x", type
);
2495 if (type
== L2CAP_IT_FEAT_MASK
) {
2497 u32 feat_mask
= l2cap_feat_mask
;
2498 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2499 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2500 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2502 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2504 put_unaligned_le32(feat_mask
, rsp
->data
);
2505 l2cap_send_cmd(conn
, cmd
->ident
,
2506 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2507 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2509 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2510 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2511 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2512 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2513 l2cap_send_cmd(conn
, cmd
->ident
,
2514 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2516 struct l2cap_info_rsp rsp
;
2517 rsp
.type
= cpu_to_le16(type
);
2518 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2519 l2cap_send_cmd(conn
, cmd
->ident
,
2520 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2526 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2528 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2531 type
= __le16_to_cpu(rsp
->type
);
2532 result
= __le16_to_cpu(rsp
->result
);
2534 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2537 if (cmd
->ident
!= conn
->info_ident
||
2538 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2541 del_timer(&conn
->info_timer
);
2543 if (result
!= L2CAP_IR_SUCCESS
) {
2544 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2545 conn
->info_ident
= 0;
2547 l2cap_conn_start(conn
);
2552 if (type
== L2CAP_IT_FEAT_MASK
) {
2553 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2555 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2556 struct l2cap_info_req req
;
2557 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2559 conn
->info_ident
= l2cap_get_ident(conn
);
2561 l2cap_send_cmd(conn
, conn
->info_ident
,
2562 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2564 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2565 conn
->info_ident
= 0;
2567 l2cap_conn_start(conn
);
2569 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2570 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2571 conn
->info_ident
= 0;
2573 l2cap_conn_start(conn
);
2579 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2584 if (min
> max
|| min
< 6 || max
> 3200)
2587 if (to_multiplier
< 10 || to_multiplier
> 3200)
2590 if (max
>= to_multiplier
* 8)
2593 max_latency
= (to_multiplier
* 8 / max
) - 1;
2594 if (latency
> 499 || latency
> max_latency
)
2600 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2601 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2603 struct hci_conn
*hcon
= conn
->hcon
;
2604 struct l2cap_conn_param_update_req
*req
;
2605 struct l2cap_conn_param_update_rsp rsp
;
2606 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2609 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2612 cmd_len
= __le16_to_cpu(cmd
->len
);
2613 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2616 req
= (struct l2cap_conn_param_update_req
*) data
;
2617 min
= __le16_to_cpu(req
->min
);
2618 max
= __le16_to_cpu(req
->max
);
2619 latency
= __le16_to_cpu(req
->latency
);
2620 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2622 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2623 min
, max
, latency
, to_multiplier
);
2625 memset(&rsp
, 0, sizeof(rsp
));
2627 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2629 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2631 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2633 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2637 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2642 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2643 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2647 switch (cmd
->code
) {
2648 case L2CAP_COMMAND_REJ
:
2649 l2cap_command_rej(conn
, cmd
, data
);
2652 case L2CAP_CONN_REQ
:
2653 err
= l2cap_connect_req(conn
, cmd
, data
);
2656 case L2CAP_CONN_RSP
:
2657 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2660 case L2CAP_CONF_REQ
:
2661 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2664 case L2CAP_CONF_RSP
:
2665 err
= l2cap_config_rsp(conn
, cmd
, data
);
2668 case L2CAP_DISCONN_REQ
:
2669 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2672 case L2CAP_DISCONN_RSP
:
2673 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2676 case L2CAP_ECHO_REQ
:
2677 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2680 case L2CAP_ECHO_RSP
:
2683 case L2CAP_INFO_REQ
:
2684 err
= l2cap_information_req(conn
, cmd
, data
);
2687 case L2CAP_INFO_RSP
:
2688 err
= l2cap_information_rsp(conn
, cmd
, data
);
2692 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2700 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2701 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2703 switch (cmd
->code
) {
2704 case L2CAP_COMMAND_REJ
:
2707 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2708 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2710 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2714 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2719 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2720 struct sk_buff
*skb
)
2722 u8
*data
= skb
->data
;
2724 struct l2cap_cmd_hdr cmd
;
2727 l2cap_raw_recv(conn
, skb
);
2729 while (len
>= L2CAP_CMD_HDR_SIZE
) {
2731 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
2732 data
+= L2CAP_CMD_HDR_SIZE
;
2733 len
-= L2CAP_CMD_HDR_SIZE
;
2735 cmd_len
= le16_to_cpu(cmd
.len
);
2737 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
2739 if (cmd_len
> len
|| !cmd
.ident
) {
2740 BT_DBG("corrupted command");
2744 if (conn
->hcon
->type
== LE_LINK
)
2745 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
2747 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
2750 struct l2cap_cmd_rej rej
;
2752 BT_ERR("Wrong link type (%d)", err
);
2754 /* FIXME: Map err to a valid reason */
2755 rej
.reason
= cpu_to_le16(0);
2756 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
2766 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
2768 u16 our_fcs
, rcv_fcs
;
2769 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
2771 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2772 skb_trim(skb
, skb
->len
- 2);
2773 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
2774 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
2776 if (our_fcs
!= rcv_fcs
)
2782 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
2786 chan
->frames_sent
= 0;
2788 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2790 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2791 control
|= L2CAP_SUPER_RCV_NOT_READY
;
2792 l2cap_send_sframe(chan
, control
);
2793 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
2796 if (chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
2797 l2cap_retransmit_frames(chan
);
2799 l2cap_ertm_send(chan
);
2801 if (!(chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
2802 chan
->frames_sent
== 0) {
2803 control
|= L2CAP_SUPER_RCV_READY
;
2804 l2cap_send_sframe(chan
, control
);
2808 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
2810 struct sk_buff
*next_skb
;
2811 int tx_seq_offset
, next_tx_seq_offset
;
2813 bt_cb(skb
)->tx_seq
= tx_seq
;
2814 bt_cb(skb
)->sar
= sar
;
2816 next_skb
= skb_peek(&chan
->srej_q
);
2818 __skb_queue_tail(&chan
->srej_q
, skb
);
2822 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
2823 if (tx_seq_offset
< 0)
2824 tx_seq_offset
+= 64;
2827 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
2830 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
2831 chan
->buffer_seq
) % 64;
2832 if (next_tx_seq_offset
< 0)
2833 next_tx_seq_offset
+= 64;
2835 if (next_tx_seq_offset
> tx_seq_offset
) {
2836 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
2840 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
2843 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
2845 __skb_queue_tail(&chan
->srej_q
, skb
);
2850 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
2852 struct sk_buff
*_skb
;
2855 switch (control
& L2CAP_CTRL_SAR
) {
2856 case L2CAP_SDU_UNSEGMENTED
:
2857 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
2860 err
= sock_queue_rcv_skb(chan
->sk
, skb
);
2866 case L2CAP_SDU_START
:
2867 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
2870 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
2872 if (chan
->sdu_len
> chan
->imtu
)
2875 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
2879 /* pull sdu_len bytes only after alloc, because of Local Busy
2880 * condition we have to be sure that this will be executed
2881 * only once, i.e., when alloc does not fail */
2884 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2886 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
2887 chan
->partial_sdu_len
= skb
->len
;
2890 case L2CAP_SDU_CONTINUE
:
2891 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
2897 chan
->partial_sdu_len
+= skb
->len
;
2898 if (chan
->partial_sdu_len
> chan
->sdu_len
)
2901 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2906 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
2912 if (!(chan
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
2913 chan
->partial_sdu_len
+= skb
->len
;
2915 if (chan
->partial_sdu_len
> chan
->imtu
)
2918 if (chan
->partial_sdu_len
!= chan
->sdu_len
)
2921 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2924 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
2926 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2930 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
2933 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2937 chan
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
2938 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
2940 kfree_skb(chan
->sdu
);
2948 kfree_skb(chan
->sdu
);
2952 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
2957 static int l2cap_try_push_rx_skb(struct l2cap_chan
*chan
)
2959 struct sk_buff
*skb
;
2963 while ((skb
= skb_dequeue(&chan
->busy_q
))) {
2964 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
2965 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
2967 skb_queue_head(&chan
->busy_q
, skb
);
2971 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
2974 if (!(chan
->conn_state
& L2CAP_CONN_RNR_SENT
))
2977 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2978 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
2979 l2cap_send_sframe(chan
, control
);
2980 chan
->retry_count
= 1;
2982 del_timer(&chan
->retrans_timer
);
2983 __mod_monitor_timer();
2985 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
2988 chan
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
2989 chan
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
2991 BT_DBG("chan %p, Exit local busy", chan
);
2996 static void l2cap_busy_work(struct work_struct
*work
)
2998 DECLARE_WAITQUEUE(wait
, current
);
2999 struct l2cap_chan
*chan
=
3000 container_of(work
, struct l2cap_chan
, busy_work
);
3001 struct sock
*sk
= chan
->sk
;
3002 int n_tries
= 0, timeo
= HZ
/5, err
;
3003 struct sk_buff
*skb
;
3007 add_wait_queue(sk_sleep(sk
), &wait
);
3008 while ((skb
= skb_peek(&chan
->busy_q
))) {
3009 set_current_state(TASK_INTERRUPTIBLE
);
3011 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3013 l2cap_send_disconn_req(chan
->conn
, chan
, EBUSY
);
3020 if (signal_pending(current
)) {
3021 err
= sock_intr_errno(timeo
);
3026 timeo
= schedule_timeout(timeo
);
3029 err
= sock_error(sk
);
3033 if (l2cap_try_push_rx_skb(chan
) == 0)
3037 set_current_state(TASK_RUNNING
);
3038 remove_wait_queue(sk_sleep(sk
), &wait
);
3043 static int l2cap_push_rx_skb(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3047 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3048 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3049 __skb_queue_tail(&chan
->busy_q
, skb
);
3050 return l2cap_try_push_rx_skb(chan
);
3055 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3057 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3061 /* Busy Condition */
3062 BT_DBG("chan %p, Enter local busy", chan
);
3064 chan
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3065 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3066 __skb_queue_tail(&chan
->busy_q
, skb
);
3068 sctrl
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3069 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3070 l2cap_send_sframe(chan
, sctrl
);
3072 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3074 del_timer(&chan
->ack_timer
);
3076 queue_work(_busy_wq
, &chan
->busy_work
);
3081 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3083 struct sk_buff
*_skb
;
3087 * TODO: We have to notify the userland if some data is lost with the
3091 switch (control
& L2CAP_CTRL_SAR
) {
3092 case L2CAP_SDU_UNSEGMENTED
:
3093 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3094 kfree_skb(chan
->sdu
);
3098 err
= sock_queue_rcv_skb(chan
->sk
, skb
);
3104 case L2CAP_SDU_START
:
3105 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3106 kfree_skb(chan
->sdu
);
3110 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3113 if (chan
->sdu_len
> chan
->imtu
) {
3118 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3124 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3126 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3127 chan
->partial_sdu_len
= skb
->len
;
3131 case L2CAP_SDU_CONTINUE
:
3132 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3135 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3137 chan
->partial_sdu_len
+= skb
->len
;
3138 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3139 kfree_skb(chan
->sdu
);
3146 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3149 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3151 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3152 chan
->partial_sdu_len
+= skb
->len
;
3154 if (chan
->partial_sdu_len
> chan
->imtu
)
3157 if (chan
->partial_sdu_len
== chan
->sdu_len
) {
3158 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3159 err
= sock_queue_rcv_skb(chan
->sk
, _skb
);
3166 kfree_skb(chan
->sdu
);
3174 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3176 struct sk_buff
*skb
;
3179 while ((skb
= skb_peek(&chan
->srej_q
))) {
3180 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3183 skb
= skb_dequeue(&chan
->srej_q
);
3184 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3185 l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3186 chan
->buffer_seq_srej
=
3187 (chan
->buffer_seq_srej
+ 1) % 64;
3188 tx_seq
= (tx_seq
+ 1) % 64;
3192 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3194 struct srej_list
*l
, *tmp
;
3197 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3198 if (l
->tx_seq
== tx_seq
) {
3203 control
= L2CAP_SUPER_SELECT_REJECT
;
3204 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3205 l2cap_send_sframe(chan
, control
);
3207 list_add_tail(&l
->list
, &chan
->srej_l
);
3211 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3213 struct srej_list
*new;
3216 while (tx_seq
!= chan
->expected_tx_seq
) {
3217 control
= L2CAP_SUPER_SELECT_REJECT
;
3218 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3219 l2cap_send_sframe(chan
, control
);
3221 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3222 new->tx_seq
= chan
->expected_tx_seq
;
3223 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3224 list_add_tail(&new->list
, &chan
->srej_l
);
3226 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3229 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3231 u8 tx_seq
= __get_txseq(rx_control
);
3232 u8 req_seq
= __get_reqseq(rx_control
);
3233 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3234 int tx_seq_offset
, expected_tx_seq_offset
;
3235 int num_to_ack
= (chan
->tx_win
/6) + 1;
3238 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3239 tx_seq
, rx_control
);
3241 if (L2CAP_CTRL_FINAL
& rx_control
&&
3242 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3243 del_timer(&chan
->monitor_timer
);
3244 if (chan
->unacked_frames
> 0)
3245 __mod_retrans_timer();
3246 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3249 chan
->expected_ack_seq
= req_seq
;
3250 l2cap_drop_acked_frames(chan
);
3252 if (tx_seq
== chan
->expected_tx_seq
)
3255 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3256 if (tx_seq_offset
< 0)
3257 tx_seq_offset
+= 64;
3259 /* invalid tx_seq */
3260 if (tx_seq_offset
>= chan
->tx_win
) {
3261 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3265 if (chan
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3268 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3269 struct srej_list
*first
;
3271 first
= list_first_entry(&chan
->srej_l
,
3272 struct srej_list
, list
);
3273 if (tx_seq
== first
->tx_seq
) {
3274 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3275 l2cap_check_srej_gap(chan
, tx_seq
);
3277 list_del(&first
->list
);
3280 if (list_empty(&chan
->srej_l
)) {
3281 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3282 chan
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3283 l2cap_send_ack(chan
);
3284 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3287 struct srej_list
*l
;
3289 /* duplicated tx_seq */
3290 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3293 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3294 if (l
->tx_seq
== tx_seq
) {
3295 l2cap_resend_srejframe(chan
, tx_seq
);
3299 l2cap_send_srejframe(chan
, tx_seq
);
3302 expected_tx_seq_offset
=
3303 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3304 if (expected_tx_seq_offset
< 0)
3305 expected_tx_seq_offset
+= 64;
3307 /* duplicated tx_seq */
3308 if (tx_seq_offset
< expected_tx_seq_offset
)
3311 chan
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3313 BT_DBG("chan %p, Enter SREJ", chan
);
3315 INIT_LIST_HEAD(&chan
->srej_l
);
3316 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3318 __skb_queue_head_init(&chan
->srej_q
);
3319 __skb_queue_head_init(&chan
->busy_q
);
3320 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3322 chan
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3324 l2cap_send_srejframe(chan
, tx_seq
);
3326 del_timer(&chan
->ack_timer
);
3331 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3333 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3334 bt_cb(skb
)->tx_seq
= tx_seq
;
3335 bt_cb(skb
)->sar
= sar
;
3336 __skb_queue_tail(&chan
->srej_q
, skb
);
3340 err
= l2cap_push_rx_skb(chan
, skb
, rx_control
);
3344 if (rx_control
& L2CAP_CTRL_FINAL
) {
3345 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3346 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3348 l2cap_retransmit_frames(chan
);
3353 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3354 if (chan
->num_acked
== num_to_ack
- 1)
3355 l2cap_send_ack(chan
);
3364 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3366 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3369 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3370 l2cap_drop_acked_frames(chan
);
3372 if (rx_control
& L2CAP_CTRL_POLL
) {
3373 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3374 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3375 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3376 (chan
->unacked_frames
> 0))
3377 __mod_retrans_timer();
3379 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3380 l2cap_send_srejtail(chan
);
3382 l2cap_send_i_or_rr_or_rnr(chan
);
3385 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3386 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3388 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3389 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3391 l2cap_retransmit_frames(chan
);
3394 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3395 (chan
->unacked_frames
> 0))
3396 __mod_retrans_timer();
3398 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3399 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3400 l2cap_send_ack(chan
);
3402 l2cap_ertm_send(chan
);
3406 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3408 u8 tx_seq
= __get_reqseq(rx_control
);
3410 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3412 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3414 chan
->expected_ack_seq
= tx_seq
;
3415 l2cap_drop_acked_frames(chan
);
3417 if (rx_control
& L2CAP_CTRL_FINAL
) {
3418 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3419 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3421 l2cap_retransmit_frames(chan
);
3423 l2cap_retransmit_frames(chan
);
3425 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
)
3426 chan
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3429 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3431 u8 tx_seq
= __get_reqseq(rx_control
);
3433 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3435 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3437 if (rx_control
& L2CAP_CTRL_POLL
) {
3438 chan
->expected_ack_seq
= tx_seq
;
3439 l2cap_drop_acked_frames(chan
);
3441 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3442 l2cap_retransmit_one_frame(chan
, tx_seq
);
3444 l2cap_ertm_send(chan
);
3446 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3447 chan
->srej_save_reqseq
= tx_seq
;
3448 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3450 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3451 if ((chan
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3452 chan
->srej_save_reqseq
== tx_seq
)
3453 chan
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3455 l2cap_retransmit_one_frame(chan
, tx_seq
);
3457 l2cap_retransmit_one_frame(chan
, tx_seq
);
3458 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3459 chan
->srej_save_reqseq
= tx_seq
;
3460 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3465 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3467 u8 tx_seq
= __get_reqseq(rx_control
);
3469 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3471 chan
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3472 chan
->expected_ack_seq
= tx_seq
;
3473 l2cap_drop_acked_frames(chan
);
3475 if (rx_control
& L2CAP_CTRL_POLL
)
3476 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3478 if (!(chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3479 del_timer(&chan
->retrans_timer
);
3480 if (rx_control
& L2CAP_CTRL_POLL
)
3481 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3485 if (rx_control
& L2CAP_CTRL_POLL
)
3486 l2cap_send_srejtail(chan
);
3488 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3491 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3493 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3495 if (L2CAP_CTRL_FINAL
& rx_control
&&
3496 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3497 del_timer(&chan
->monitor_timer
);
3498 if (chan
->unacked_frames
> 0)
3499 __mod_retrans_timer();
3500 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3503 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3504 case L2CAP_SUPER_RCV_READY
:
3505 l2cap_data_channel_rrframe(chan
, rx_control
);
3508 case L2CAP_SUPER_REJECT
:
3509 l2cap_data_channel_rejframe(chan
, rx_control
);
3512 case L2CAP_SUPER_SELECT_REJECT
:
3513 l2cap_data_channel_srejframe(chan
, rx_control
);
3516 case L2CAP_SUPER_RCV_NOT_READY
:
3517 l2cap_data_channel_rnrframe(chan
, rx_control
);
3525 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3527 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3530 int len
, next_tx_seq_offset
, req_seq_offset
;
3532 control
= get_unaligned_le16(skb
->data
);
3537 * We can just drop the corrupted I-frame here.
3538 * Receiver will miss it and start proper recovery
3539 * procedures and ask retransmission.
3541 if (l2cap_check_fcs(chan
, skb
))
3544 if (__is_sar_start(control
) && __is_iframe(control
))
3547 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3550 if (len
> chan
->mps
) {
3551 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3555 req_seq
= __get_reqseq(control
);
3556 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3557 if (req_seq_offset
< 0)
3558 req_seq_offset
+= 64;
3560 next_tx_seq_offset
=
3561 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3562 if (next_tx_seq_offset
< 0)
3563 next_tx_seq_offset
+= 64;
3565 /* check for invalid req-seq */
3566 if (req_seq_offset
> next_tx_seq_offset
) {
3567 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3571 if (__is_iframe(control
)) {
3573 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3577 l2cap_data_channel_iframe(chan
, control
, skb
);
3581 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3585 l2cap_data_channel_sframe(chan
, control
, skb
);
3595 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3597 struct l2cap_chan
*chan
;
3598 struct sock
*sk
= NULL
;
3599 struct l2cap_pinfo
*pi
;
3604 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3606 BT_DBG("unknown cid 0x%4.4x", cid
);
3613 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3615 if (sk
->sk_state
!= BT_CONNECTED
)
3618 switch (chan
->mode
) {
3619 case L2CAP_MODE_BASIC
:
3620 /* If socket recv buffers overflows we drop data here
3621 * which is *bad* because L2CAP has to be reliable.
3622 * But we don't have any other choice. L2CAP doesn't
3623 * provide flow control mechanism. */
3625 if (chan
->imtu
< skb
->len
)
3628 if (!sock_queue_rcv_skb(sk
, skb
))
3632 case L2CAP_MODE_ERTM
:
3633 if (!sock_owned_by_user(sk
)) {
3634 l2cap_ertm_data_rcv(sk
, skb
);
3636 if (sk_add_backlog(sk
, skb
))
3642 case L2CAP_MODE_STREAMING
:
3643 control
= get_unaligned_le16(skb
->data
);
3647 if (l2cap_check_fcs(chan
, skb
))
3650 if (__is_sar_start(control
))
3653 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3656 if (len
> chan
->mps
|| len
< 0 || __is_sframe(control
))
3659 tx_seq
= __get_txseq(control
);
3661 if (chan
->expected_tx_seq
== tx_seq
)
3662 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3664 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3666 l2cap_streaming_reassembly_sdu(chan
, skb
, control
);
3671 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3685 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3689 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3695 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3697 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3700 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
3703 if (!sock_queue_rcv_skb(sk
, skb
))
3715 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3719 sk
= l2cap_get_sock_by_scid(0, cid
, conn
->src
);
3725 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3727 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3730 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
3733 if (!sock_queue_rcv_skb(sk
, skb
))
3745 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3747 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3751 skb_pull(skb
, L2CAP_HDR_SIZE
);
3752 cid
= __le16_to_cpu(lh
->cid
);
3753 len
= __le16_to_cpu(lh
->len
);
3755 if (len
!= skb
->len
) {
3760 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3763 case L2CAP_CID_LE_SIGNALING
:
3764 case L2CAP_CID_SIGNALING
:
3765 l2cap_sig_channel(conn
, skb
);
3768 case L2CAP_CID_CONN_LESS
:
3769 psm
= get_unaligned_le16(skb
->data
);
3771 l2cap_conless_channel(conn
, psm
, skb
);
3774 case L2CAP_CID_LE_DATA
:
3775 l2cap_att_channel(conn
, cid
, skb
);
3779 l2cap_data_channel(conn
, cid
, skb
);
3784 /* ---- L2CAP interface with lower layer (HCI) ---- */
3786 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3788 int exact
= 0, lm1
= 0, lm2
= 0;
3789 register struct sock
*sk
;
3790 struct hlist_node
*node
;
3792 if (type
!= ACL_LINK
)
3795 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3797 /* Find listening sockets and check their link_mode */
3798 read_lock(&l2cap_sk_list
.lock
);
3799 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3800 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3802 if (sk
->sk_state
!= BT_LISTEN
)
3805 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3806 lm1
|= HCI_LM_ACCEPT
;
3807 if (chan
->role_switch
)
3808 lm1
|= HCI_LM_MASTER
;
3810 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3811 lm2
|= HCI_LM_ACCEPT
;
3812 if (chan
->role_switch
)
3813 lm2
|= HCI_LM_MASTER
;
3816 read_unlock(&l2cap_sk_list
.lock
);
3818 return exact
? lm1
: lm2
;
3821 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3823 struct l2cap_conn
*conn
;
3825 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3827 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3831 conn
= l2cap_conn_add(hcon
, status
);
3833 l2cap_conn_ready(conn
);
3835 l2cap_conn_del(hcon
, bt_err(status
));
3840 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3842 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3844 BT_DBG("hcon %p", hcon
);
3846 if (hcon
->type
!= ACL_LINK
|| !conn
)
3849 return conn
->disc_reason
;
3852 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3854 BT_DBG("hcon %p reason %d", hcon
, reason
);
3856 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3859 l2cap_conn_del(hcon
, bt_err(reason
));
3864 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
3866 struct sock
*sk
= chan
->sk
;
3868 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
3871 if (encrypt
== 0x00) {
3872 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
3873 l2cap_sock_clear_timer(sk
);
3874 l2cap_sock_set_timer(sk
, HZ
* 5);
3875 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
3876 __l2cap_sock_close(sk
, ECONNREFUSED
);
3878 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
3879 l2cap_sock_clear_timer(sk
);
3883 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3885 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3886 struct l2cap_chan
*chan
;
3891 BT_DBG("conn %p", conn
);
3893 read_lock(&conn
->chan_lock
);
3895 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
3896 struct sock
*sk
= chan
->sk
;
3900 if (chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3905 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3906 sk
->sk_state
== BT_CONFIG
)) {
3907 l2cap_check_encryption(chan
, encrypt
);
3912 if (sk
->sk_state
== BT_CONNECT
) {
3914 struct l2cap_conn_req req
;
3915 req
.scid
= cpu_to_le16(chan
->scid
);
3916 req
.psm
= chan
->psm
;
3918 chan
->ident
= l2cap_get_ident(conn
);
3919 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3921 l2cap_send_cmd(conn
, chan
->ident
,
3922 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3924 l2cap_sock_clear_timer(sk
);
3925 l2cap_sock_set_timer(sk
, HZ
/ 10);
3927 } else if (sk
->sk_state
== BT_CONNECT2
) {
3928 struct l2cap_conn_rsp rsp
;
3932 sk
->sk_state
= BT_CONFIG
;
3933 result
= L2CAP_CR_SUCCESS
;
3935 sk
->sk_state
= BT_DISCONN
;
3936 l2cap_sock_set_timer(sk
, HZ
/ 10);
3937 result
= L2CAP_CR_SEC_BLOCK
;
3940 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3941 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3942 rsp
.result
= cpu_to_le16(result
);
3943 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3944 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
3951 read_unlock(&conn
->chan_lock
);
3956 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3958 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3961 conn
= l2cap_conn_add(hcon
, 0);
3966 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3968 if (!(flags
& ACL_CONT
)) {
3969 struct l2cap_hdr
*hdr
;
3970 struct l2cap_chan
*chan
;
3975 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3976 kfree_skb(conn
->rx_skb
);
3977 conn
->rx_skb
= NULL
;
3979 l2cap_conn_unreliable(conn
, ECOMM
);
3982 /* Start fragment always begin with Basic L2CAP header */
3983 if (skb
->len
< L2CAP_HDR_SIZE
) {
3984 BT_ERR("Frame is too short (len %d)", skb
->len
);
3985 l2cap_conn_unreliable(conn
, ECOMM
);
3989 hdr
= (struct l2cap_hdr
*) skb
->data
;
3990 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3991 cid
= __le16_to_cpu(hdr
->cid
);
3993 if (len
== skb
->len
) {
3994 /* Complete frame received */
3995 l2cap_recv_frame(conn
, skb
);
3999 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4001 if (skb
->len
> len
) {
4002 BT_ERR("Frame is too long (len %d, expected len %d)",
4004 l2cap_conn_unreliable(conn
, ECOMM
);
4008 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4010 if (chan
&& chan
->sk
) {
4011 struct sock
*sk
= chan
->sk
;
4013 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4014 BT_ERR("Frame exceeding recv MTU (len %d, "
4018 l2cap_conn_unreliable(conn
, ECOMM
);
4024 /* Allocate skb for the complete frame (with header) */
4025 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4029 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4031 conn
->rx_len
= len
- skb
->len
;
4033 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4035 if (!conn
->rx_len
) {
4036 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4037 l2cap_conn_unreliable(conn
, ECOMM
);
4041 if (skb
->len
> conn
->rx_len
) {
4042 BT_ERR("Fragment is too long (len %d, expected %d)",
4043 skb
->len
, conn
->rx_len
);
4044 kfree_skb(conn
->rx_skb
);
4045 conn
->rx_skb
= NULL
;
4047 l2cap_conn_unreliable(conn
, ECOMM
);
4051 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4053 conn
->rx_len
-= skb
->len
;
4055 if (!conn
->rx_len
) {
4056 /* Complete frame received */
4057 l2cap_recv_frame(conn
, conn
->rx_skb
);
4058 conn
->rx_skb
= NULL
;
4067 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4070 struct hlist_node
*node
;
4072 read_lock_bh(&l2cap_sk_list
.lock
);
4074 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4075 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4076 struct l2cap_chan
*chan
= pi
->chan
;
4078 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4079 batostr(&bt_sk(sk
)->src
),
4080 batostr(&bt_sk(sk
)->dst
),
4081 sk
->sk_state
, __le16_to_cpu(chan
->psm
),
4082 chan
->scid
, chan
->dcid
,
4083 chan
->imtu
, chan
->omtu
, chan
->sec_level
,
4087 read_unlock_bh(&l2cap_sk_list
.lock
);
4092 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4094 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4097 static const struct file_operations l2cap_debugfs_fops
= {
4098 .open
= l2cap_debugfs_open
,
4100 .llseek
= seq_lseek
,
4101 .release
= single_release
,
4104 static struct dentry
*l2cap_debugfs
;
4106 static struct hci_proto l2cap_hci_proto
= {
4108 .id
= HCI_PROTO_L2CAP
,
4109 .connect_ind
= l2cap_connect_ind
,
4110 .connect_cfm
= l2cap_connect_cfm
,
4111 .disconn_ind
= l2cap_disconn_ind
,
4112 .disconn_cfm
= l2cap_disconn_cfm
,
4113 .security_cfm
= l2cap_security_cfm
,
4114 .recv_acldata
= l2cap_recv_acldata
4117 int __init
l2cap_init(void)
4121 err
= l2cap_init_sockets();
4125 _busy_wq
= create_singlethread_workqueue("l2cap");
4131 err
= hci_register_proto(&l2cap_hci_proto
);
4133 BT_ERR("L2CAP protocol registration failed");
4134 bt_sock_unregister(BTPROTO_L2CAP
);
4139 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4140 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4142 BT_ERR("Failed to create L2CAP debug file");
4148 destroy_workqueue(_busy_wq
);
4149 l2cap_cleanup_sockets();
4153 void l2cap_exit(void)
4155 debugfs_remove(l2cap_debugfs
);
4157 flush_workqueue(_busy_wq
);
4158 destroy_workqueue(_busy_wq
);
4160 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4161 BT_ERR("L2CAP protocol unregistration failed");
4163 l2cap_cleanup_sockets();
4166 module_param(disable_ertm
, bool, 0644);
4167 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");