2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
62 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
64 static LIST_HEAD(chan_list
);
65 static DEFINE_RWLOCK(chan_list_lock
);
67 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
68 u8 code
, u8 ident
, u16 dlen
, void *data
);
69 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
71 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
72 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
73 struct l2cap_chan
*chan
, int err
);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
81 list_for_each_entry(c
, &conn
->chan_l
, list
) {
88 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
92 list_for_each_entry(c
, &conn
->chan_l
, list
) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
103 struct l2cap_chan
*c
;
105 mutex_lock(&conn
->chan_lock
);
106 c
= __l2cap_get_chan_by_scid(conn
, cid
);
107 mutex_unlock(&conn
->chan_lock
);
112 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
114 struct l2cap_chan
*c
;
116 list_for_each_entry(c
, &conn
->chan_l
, list
) {
117 if (c
->ident
== ident
)
123 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
125 struct l2cap_chan
*c
;
127 mutex_lock(&conn
->chan_lock
);
128 c
= __l2cap_get_chan_by_ident(conn
, ident
);
129 mutex_unlock(&conn
->chan_lock
);
134 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
136 struct l2cap_chan
*c
;
138 list_for_each_entry(c
, &chan_list
, global_l
) {
139 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
145 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
149 write_lock(&chan_list_lock
);
151 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
164 for (p
= 0x1001; p
< 0x1100; p
+= 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
166 chan
->psm
= cpu_to_le16(p
);
167 chan
->sport
= cpu_to_le16(p
);
174 write_unlock(&chan_list_lock
);
178 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
180 write_lock(&chan_list_lock
);
184 write_unlock(&chan_list_lock
);
189 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
191 u16 cid
= L2CAP_CID_DYN_START
;
193 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
194 if (!__l2cap_get_chan_by_scid(conn
, cid
))
201 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
203 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
204 state_to_string(state
));
207 chan
->ops
->state_change(chan
->data
, state
);
210 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
212 struct sock
*sk
= chan
->sk
;
215 __l2cap_state_change(chan
, state
);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
221 struct sock
*sk
= chan
->sk
;
226 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
228 struct sock
*sk
= chan
->sk
;
231 __l2cap_chan_set_err(chan
, err
);
235 /* ---- L2CAP sequence number lists ---- */
237 /* For ERTM, ordered lists of sequence numbers must be tracked for
238 * SREJ requests that are received and for frames that are to be
239 * retransmitted. These seq_list functions implement a singly-linked
240 * list in an array, where membership in the list can also be checked
241 * in constant time. Items can also be added to the tail of the list
242 * and removed from the head in constant time, without further memory
246 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
248 size_t alloc_size
, i
;
250 /* Allocated size is a power of 2 to map sequence numbers
251 * (which may be up to 14 bits) in to a smaller array that is
252 * sized for the negotiated ERTM transmit windows.
254 alloc_size
= roundup_pow_of_two(size
);
256 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
260 seq_list
->mask
= alloc_size
- 1;
261 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
262 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
263 for (i
= 0; i
< alloc_size
; i
++)
264 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
269 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
271 kfree(seq_list
->list
);
274 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
277 /* Constant-time check for list membership */
278 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
281 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
283 u16 mask
= seq_list
->mask
;
285 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
286 /* In case someone tries to pop the head of an empty list */
287 return L2CAP_SEQ_LIST_CLEAR
;
288 } else if (seq_list
->head
== seq
) {
289 /* Head can be removed in constant time */
290 seq_list
->head
= seq_list
->list
[seq
& mask
];
291 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
294 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
295 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
298 /* Walk the list to find the sequence number */
299 u16 prev
= seq_list
->head
;
300 while (seq_list
->list
[prev
& mask
] != seq
) {
301 prev
= seq_list
->list
[prev
& mask
];
302 if (prev
== L2CAP_SEQ_LIST_TAIL
)
303 return L2CAP_SEQ_LIST_CLEAR
;
306 /* Unlink the number from the list and clear it */
307 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
308 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
309 if (seq_list
->tail
== seq
)
310 seq_list
->tail
= prev
;
315 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
317 /* Remove the head in constant time */
318 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
321 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
323 if (seq_list
->head
!= L2CAP_SEQ_LIST_CLEAR
) {
325 for (i
= 0; i
<= seq_list
->mask
; i
++)
326 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
328 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
329 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
333 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
335 u16 mask
= seq_list
->mask
;
337 /* All appends happen in constant time */
339 if (seq_list
->list
[seq
& mask
] == L2CAP_SEQ_LIST_CLEAR
) {
340 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
341 seq_list
->head
= seq
;
343 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
345 seq_list
->tail
= seq
;
346 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
350 static void l2cap_chan_timeout(struct work_struct
*work
)
352 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
354 struct l2cap_conn
*conn
= chan
->conn
;
357 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
359 mutex_lock(&conn
->chan_lock
);
360 l2cap_chan_lock(chan
);
362 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
363 reason
= ECONNREFUSED
;
364 else if (chan
->state
== BT_CONNECT
&&
365 chan
->sec_level
!= BT_SECURITY_SDP
)
366 reason
= ECONNREFUSED
;
370 l2cap_chan_close(chan
, reason
);
372 l2cap_chan_unlock(chan
);
374 chan
->ops
->close(chan
->data
);
375 mutex_unlock(&conn
->chan_lock
);
377 l2cap_chan_put(chan
);
380 struct l2cap_chan
*l2cap_chan_create(void)
382 struct l2cap_chan
*chan
;
384 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
388 mutex_init(&chan
->lock
);
390 write_lock(&chan_list_lock
);
391 list_add(&chan
->global_l
, &chan_list
);
392 write_unlock(&chan_list_lock
);
394 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
396 chan
->state
= BT_OPEN
;
398 atomic_set(&chan
->refcnt
, 1);
400 BT_DBG("chan %p", chan
);
405 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
407 write_lock(&chan_list_lock
);
408 list_del(&chan
->global_l
);
409 write_unlock(&chan_list_lock
);
411 l2cap_chan_put(chan
);
414 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
416 chan
->fcs
= L2CAP_FCS_CRC16
;
417 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
418 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
419 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
420 chan
->sec_level
= BT_SECURITY_LOW
;
422 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
425 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
427 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
428 __le16_to_cpu(chan
->psm
), chan
->dcid
);
430 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
434 switch (chan
->chan_type
) {
435 case L2CAP_CHAN_CONN_ORIENTED
:
436 if (conn
->hcon
->type
== LE_LINK
) {
438 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
439 chan
->scid
= L2CAP_CID_LE_DATA
;
440 chan
->dcid
= L2CAP_CID_LE_DATA
;
442 /* Alloc CID for connection-oriented socket */
443 chan
->scid
= l2cap_alloc_cid(conn
);
444 chan
->omtu
= L2CAP_DEFAULT_MTU
;
448 case L2CAP_CHAN_CONN_LESS
:
449 /* Connectionless socket */
450 chan
->scid
= L2CAP_CID_CONN_LESS
;
451 chan
->dcid
= L2CAP_CID_CONN_LESS
;
452 chan
->omtu
= L2CAP_DEFAULT_MTU
;
456 /* Raw socket can send/recv signalling messages only */
457 chan
->scid
= L2CAP_CID_SIGNALING
;
458 chan
->dcid
= L2CAP_CID_SIGNALING
;
459 chan
->omtu
= L2CAP_DEFAULT_MTU
;
462 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
463 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
464 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
465 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
466 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
467 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
469 l2cap_chan_hold(chan
);
471 list_add(&chan
->list
, &conn
->chan_l
);
474 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
476 mutex_lock(&conn
->chan_lock
);
477 __l2cap_chan_add(conn
, chan
);
478 mutex_unlock(&conn
->chan_lock
);
481 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
483 struct sock
*sk
= chan
->sk
;
484 struct l2cap_conn
*conn
= chan
->conn
;
485 struct sock
*parent
= bt_sk(sk
)->parent
;
487 __clear_chan_timer(chan
);
489 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
492 /* Delete from channel list */
493 list_del(&chan
->list
);
495 l2cap_chan_put(chan
);
498 hci_conn_put(conn
->hcon
);
503 __l2cap_state_change(chan
, BT_CLOSED
);
504 sock_set_flag(sk
, SOCK_ZAPPED
);
507 __l2cap_chan_set_err(chan
, err
);
510 bt_accept_unlink(sk
);
511 parent
->sk_data_ready(parent
, 0);
513 sk
->sk_state_change(sk
);
517 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
518 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
521 skb_queue_purge(&chan
->tx_q
);
523 if (chan
->mode
== L2CAP_MODE_ERTM
) {
524 struct srej_list
*l
, *tmp
;
526 __clear_retrans_timer(chan
);
527 __clear_monitor_timer(chan
);
528 __clear_ack_timer(chan
);
530 skb_queue_purge(&chan
->srej_q
);
532 l2cap_seq_list_free(&chan
->srej_list
);
533 l2cap_seq_list_free(&chan
->retrans_list
);
534 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
541 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
545 BT_DBG("parent %p", parent
);
547 /* Close not yet accepted channels */
548 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
549 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
551 l2cap_chan_lock(chan
);
552 __clear_chan_timer(chan
);
553 l2cap_chan_close(chan
, ECONNRESET
);
554 l2cap_chan_unlock(chan
);
556 chan
->ops
->close(chan
->data
);
560 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
562 struct l2cap_conn
*conn
= chan
->conn
;
563 struct sock
*sk
= chan
->sk
;
565 BT_DBG("chan %p state %s sk %p", chan
,
566 state_to_string(chan
->state
), sk
);
568 switch (chan
->state
) {
571 l2cap_chan_cleanup_listen(sk
);
573 __l2cap_state_change(chan
, BT_CLOSED
);
574 sock_set_flag(sk
, SOCK_ZAPPED
);
580 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
581 conn
->hcon
->type
== ACL_LINK
) {
582 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
583 l2cap_send_disconn_req(conn
, chan
, reason
);
585 l2cap_chan_del(chan
, reason
);
589 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
590 conn
->hcon
->type
== ACL_LINK
) {
591 struct l2cap_conn_rsp rsp
;
594 if (bt_sk(sk
)->defer_setup
)
595 result
= L2CAP_CR_SEC_BLOCK
;
597 result
= L2CAP_CR_BAD_PSM
;
598 l2cap_state_change(chan
, BT_DISCONN
);
600 rsp
.scid
= cpu_to_le16(chan
->dcid
);
601 rsp
.dcid
= cpu_to_le16(chan
->scid
);
602 rsp
.result
= cpu_to_le16(result
);
603 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
604 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
608 l2cap_chan_del(chan
, reason
);
613 l2cap_chan_del(chan
, reason
);
618 sock_set_flag(sk
, SOCK_ZAPPED
);
624 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
626 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
627 switch (chan
->sec_level
) {
628 case BT_SECURITY_HIGH
:
629 return HCI_AT_DEDICATED_BONDING_MITM
;
630 case BT_SECURITY_MEDIUM
:
631 return HCI_AT_DEDICATED_BONDING
;
633 return HCI_AT_NO_BONDING
;
635 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
636 if (chan
->sec_level
== BT_SECURITY_LOW
)
637 chan
->sec_level
= BT_SECURITY_SDP
;
639 if (chan
->sec_level
== BT_SECURITY_HIGH
)
640 return HCI_AT_NO_BONDING_MITM
;
642 return HCI_AT_NO_BONDING
;
644 switch (chan
->sec_level
) {
645 case BT_SECURITY_HIGH
:
646 return HCI_AT_GENERAL_BONDING_MITM
;
647 case BT_SECURITY_MEDIUM
:
648 return HCI_AT_GENERAL_BONDING
;
650 return HCI_AT_NO_BONDING
;
655 /* Service level security */
656 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
658 struct l2cap_conn
*conn
= chan
->conn
;
661 auth_type
= l2cap_get_auth_type(chan
);
663 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
666 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
670 /* Get next available identificator.
671 * 1 - 128 are used by kernel.
672 * 129 - 199 are reserved.
673 * 200 - 254 are used by utilities like l2ping, etc.
676 spin_lock(&conn
->lock
);
678 if (++conn
->tx_ident
> 128)
683 spin_unlock(&conn
->lock
);
688 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
690 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
693 BT_DBG("code 0x%2.2x", code
);
698 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
699 flags
= ACL_START_NO_FLUSH
;
703 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
704 skb
->priority
= HCI_PRIO_MAX
;
706 hci_send_acl(conn
->hchan
, skb
, flags
);
709 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
711 struct hci_conn
*hcon
= chan
->conn
->hcon
;
714 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
717 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
718 lmp_no_flush_capable(hcon
->hdev
))
719 flags
= ACL_START_NO_FLUSH
;
723 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
724 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
727 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
730 struct l2cap_hdr
*lh
;
731 struct l2cap_conn
*conn
= chan
->conn
;
734 if (chan
->state
!= BT_CONNECTED
)
737 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
738 hlen
= L2CAP_EXT_HDR_SIZE
;
740 hlen
= L2CAP_ENH_HDR_SIZE
;
742 if (chan
->fcs
== L2CAP_FCS_CRC16
)
743 hlen
+= L2CAP_FCS_SIZE
;
745 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
747 count
= min_t(unsigned int, conn
->mtu
, hlen
);
749 control
|= __set_sframe(chan
);
751 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
752 control
|= __set_ctrl_final(chan
);
754 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
755 control
|= __set_ctrl_poll(chan
);
757 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
761 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
762 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
763 lh
->cid
= cpu_to_le16(chan
->dcid
);
765 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
767 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
768 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
769 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
772 skb
->priority
= HCI_PRIO_MAX
;
773 l2cap_do_send(chan
, skb
);
776 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
778 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
779 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
780 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
782 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
784 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
786 l2cap_send_sframe(chan
, control
);
789 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
793 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
794 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
796 if (control
->sframe
) {
797 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
798 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
799 packed
|= L2CAP_CTRL_FRAME_TYPE
;
801 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
802 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
808 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
810 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
811 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
813 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
816 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
817 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
824 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
825 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
832 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
836 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
837 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
839 if (control
->sframe
) {
840 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
841 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
842 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
844 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
845 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
851 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
853 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
854 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
856 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
859 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
860 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
867 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
868 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
875 static inline void __unpack_control(struct l2cap_chan
*chan
,
878 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
879 __unpack_extended_control(get_unaligned_le32(skb
->data
),
880 &bt_cb(skb
)->control
);
882 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
883 &bt_cb(skb
)->control
);
887 static inline void __pack_control(struct l2cap_chan
*chan
,
888 struct l2cap_ctrl
*control
,
891 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
892 put_unaligned_le32(__pack_extended_control(control
),
893 skb
->data
+ L2CAP_HDR_SIZE
);
895 put_unaligned_le16(__pack_enhanced_control(control
),
896 skb
->data
+ L2CAP_HDR_SIZE
);
900 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
902 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
905 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
907 struct l2cap_conn
*conn
= chan
->conn
;
908 struct l2cap_conn_req req
;
910 req
.scid
= cpu_to_le16(chan
->scid
);
913 chan
->ident
= l2cap_get_ident(conn
);
915 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
917 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
920 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
922 struct sock
*sk
= chan
->sk
;
927 parent
= bt_sk(sk
)->parent
;
929 BT_DBG("sk %p, parent %p", sk
, parent
);
931 chan
->conf_state
= 0;
932 __clear_chan_timer(chan
);
934 __l2cap_state_change(chan
, BT_CONNECTED
);
935 sk
->sk_state_change(sk
);
938 parent
->sk_data_ready(parent
, 0);
943 static void l2cap_do_start(struct l2cap_chan
*chan
)
945 struct l2cap_conn
*conn
= chan
->conn
;
947 if (conn
->hcon
->type
== LE_LINK
) {
948 l2cap_chan_ready(chan
);
952 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
953 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
956 if (l2cap_chan_check_security(chan
) &&
957 __l2cap_no_conn_pending(chan
))
958 l2cap_send_conn_req(chan
);
960 struct l2cap_info_req req
;
961 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
963 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
964 conn
->info_ident
= l2cap_get_ident(conn
);
966 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
968 l2cap_send_cmd(conn
, conn
->info_ident
,
969 L2CAP_INFO_REQ
, sizeof(req
), &req
);
973 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
975 u32 local_feat_mask
= l2cap_feat_mask
;
977 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
980 case L2CAP_MODE_ERTM
:
981 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
982 case L2CAP_MODE_STREAMING
:
983 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
989 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
991 struct sock
*sk
= chan
->sk
;
992 struct l2cap_disconn_req req
;
997 if (chan
->mode
== L2CAP_MODE_ERTM
) {
998 __clear_retrans_timer(chan
);
999 __clear_monitor_timer(chan
);
1000 __clear_ack_timer(chan
);
1003 req
.dcid
= cpu_to_le16(chan
->dcid
);
1004 req
.scid
= cpu_to_le16(chan
->scid
);
1005 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1006 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1009 __l2cap_state_change(chan
, BT_DISCONN
);
1010 __l2cap_chan_set_err(chan
, err
);
1014 /* ---- L2CAP connections ---- */
1015 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1017 struct l2cap_chan
*chan
, *tmp
;
1019 BT_DBG("conn %p", conn
);
1021 mutex_lock(&conn
->chan_lock
);
1023 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1024 struct sock
*sk
= chan
->sk
;
1026 l2cap_chan_lock(chan
);
1028 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1029 l2cap_chan_unlock(chan
);
1033 if (chan
->state
== BT_CONNECT
) {
1034 if (!l2cap_chan_check_security(chan
) ||
1035 !__l2cap_no_conn_pending(chan
)) {
1036 l2cap_chan_unlock(chan
);
1040 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1041 && test_bit(CONF_STATE2_DEVICE
,
1042 &chan
->conf_state
)) {
1043 l2cap_chan_close(chan
, ECONNRESET
);
1044 l2cap_chan_unlock(chan
);
1048 l2cap_send_conn_req(chan
);
1050 } else if (chan
->state
== BT_CONNECT2
) {
1051 struct l2cap_conn_rsp rsp
;
1053 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1054 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1056 if (l2cap_chan_check_security(chan
)) {
1058 if (bt_sk(sk
)->defer_setup
) {
1059 struct sock
*parent
= bt_sk(sk
)->parent
;
1060 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1061 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1063 parent
->sk_data_ready(parent
, 0);
1066 __l2cap_state_change(chan
, BT_CONFIG
);
1067 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1068 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1072 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1073 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1076 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1079 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1080 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1081 l2cap_chan_unlock(chan
);
1085 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1086 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1087 l2cap_build_conf_req(chan
, buf
), buf
);
1088 chan
->num_conf_req
++;
1091 l2cap_chan_unlock(chan
);
1094 mutex_unlock(&conn
->chan_lock
);
1097 /* Find socket with cid and source/destination bdaddr.
1098 * Returns closest match, locked.
1100 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1104 struct l2cap_chan
*c
, *c1
= NULL
;
1106 read_lock(&chan_list_lock
);
1108 list_for_each_entry(c
, &chan_list
, global_l
) {
1109 struct sock
*sk
= c
->sk
;
1111 if (state
&& c
->state
!= state
)
1114 if (c
->scid
== cid
) {
1115 int src_match
, dst_match
;
1116 int src_any
, dst_any
;
1119 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1120 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1121 if (src_match
&& dst_match
) {
1122 read_unlock(&chan_list_lock
);
1127 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1128 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1129 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1130 (src_any
&& dst_any
))
1135 read_unlock(&chan_list_lock
);
1140 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1142 struct sock
*parent
, *sk
;
1143 struct l2cap_chan
*chan
, *pchan
;
1147 /* Check if we have socket listening on cid */
1148 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1149 conn
->src
, conn
->dst
);
1157 /* Check for backlog size */
1158 if (sk_acceptq_is_full(parent
)) {
1159 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1163 chan
= pchan
->ops
->new_connection(pchan
->data
);
1169 hci_conn_hold(conn
->hcon
);
1171 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1172 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1174 bt_accept_enqueue(parent
, sk
);
1176 l2cap_chan_add(conn
, chan
);
1178 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1180 __l2cap_state_change(chan
, BT_CONNECTED
);
1181 parent
->sk_data_ready(parent
, 0);
1184 release_sock(parent
);
1187 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1189 struct l2cap_chan
*chan
;
1191 BT_DBG("conn %p", conn
);
1193 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1194 l2cap_le_conn_ready(conn
);
1196 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1197 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1199 mutex_lock(&conn
->chan_lock
);
1201 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1203 l2cap_chan_lock(chan
);
1205 if (conn
->hcon
->type
== LE_LINK
) {
1206 if (smp_conn_security(conn
, chan
->sec_level
))
1207 l2cap_chan_ready(chan
);
1209 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1210 struct sock
*sk
= chan
->sk
;
1211 __clear_chan_timer(chan
);
1213 __l2cap_state_change(chan
, BT_CONNECTED
);
1214 sk
->sk_state_change(sk
);
1217 } else if (chan
->state
== BT_CONNECT
)
1218 l2cap_do_start(chan
);
1220 l2cap_chan_unlock(chan
);
1223 mutex_unlock(&conn
->chan_lock
);
1226 /* Notify sockets that we cannot guaranty reliability anymore */
1227 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1229 struct l2cap_chan
*chan
;
1231 BT_DBG("conn %p", conn
);
1233 mutex_lock(&conn
->chan_lock
);
1235 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1236 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1237 __l2cap_chan_set_err(chan
, err
);
1240 mutex_unlock(&conn
->chan_lock
);
1243 static void l2cap_info_timeout(struct work_struct
*work
)
1245 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1248 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1249 conn
->info_ident
= 0;
1251 l2cap_conn_start(conn
);
1254 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1256 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1257 struct l2cap_chan
*chan
, *l
;
1262 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1264 kfree_skb(conn
->rx_skb
);
1266 mutex_lock(&conn
->chan_lock
);
1269 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1270 l2cap_chan_lock(chan
);
1272 l2cap_chan_del(chan
, err
);
1274 l2cap_chan_unlock(chan
);
1276 chan
->ops
->close(chan
->data
);
1279 mutex_unlock(&conn
->chan_lock
);
1281 hci_chan_del(conn
->hchan
);
1283 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1284 cancel_delayed_work_sync(&conn
->info_timer
);
1286 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1287 cancel_delayed_work_sync(&conn
->security_timer
);
1288 smp_chan_destroy(conn
);
1291 hcon
->l2cap_data
= NULL
;
1295 static void security_timeout(struct work_struct
*work
)
1297 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1298 security_timer
.work
);
1300 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1303 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1305 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1306 struct hci_chan
*hchan
;
1311 hchan
= hci_chan_create(hcon
);
1315 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1317 hci_chan_del(hchan
);
1321 hcon
->l2cap_data
= conn
;
1323 conn
->hchan
= hchan
;
1325 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1327 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1328 conn
->mtu
= hcon
->hdev
->le_mtu
;
1330 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1332 conn
->src
= &hcon
->hdev
->bdaddr
;
1333 conn
->dst
= &hcon
->dst
;
1335 conn
->feat_mask
= 0;
1337 spin_lock_init(&conn
->lock
);
1338 mutex_init(&conn
->chan_lock
);
1340 INIT_LIST_HEAD(&conn
->chan_l
);
1342 if (hcon
->type
== LE_LINK
)
1343 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1345 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1347 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1352 /* ---- Socket interface ---- */
1354 /* Find socket with psm and source / destination bdaddr.
1355 * Returns closest match.
1357 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1361 struct l2cap_chan
*c
, *c1
= NULL
;
1363 read_lock(&chan_list_lock
);
1365 list_for_each_entry(c
, &chan_list
, global_l
) {
1366 struct sock
*sk
= c
->sk
;
1368 if (state
&& c
->state
!= state
)
1371 if (c
->psm
== psm
) {
1372 int src_match
, dst_match
;
1373 int src_any
, dst_any
;
1376 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1377 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1378 if (src_match
&& dst_match
) {
1379 read_unlock(&chan_list_lock
);
1384 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1385 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1386 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1387 (src_any
&& dst_any
))
1392 read_unlock(&chan_list_lock
);
1397 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1398 bdaddr_t
*dst
, u8 dst_type
)
1400 struct sock
*sk
= chan
->sk
;
1401 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1402 struct l2cap_conn
*conn
;
1403 struct hci_conn
*hcon
;
1404 struct hci_dev
*hdev
;
1408 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1409 dst_type
, __le16_to_cpu(chan
->psm
));
1411 hdev
= hci_get_route(dst
, src
);
1413 return -EHOSTUNREACH
;
1417 l2cap_chan_lock(chan
);
1419 /* PSM must be odd and lsb of upper byte must be 0 */
1420 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1421 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1426 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1431 switch (chan
->mode
) {
1432 case L2CAP_MODE_BASIC
:
1434 case L2CAP_MODE_ERTM
:
1435 case L2CAP_MODE_STREAMING
:
1446 switch (sk
->sk_state
) {
1450 /* Already connecting */
1456 /* Already connected */
1472 /* Set destination address and psm */
1473 bacpy(&bt_sk(sk
)->dst
, dst
);
1480 auth_type
= l2cap_get_auth_type(chan
);
1482 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1483 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1484 chan
->sec_level
, auth_type
);
1486 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1487 chan
->sec_level
, auth_type
);
1490 err
= PTR_ERR(hcon
);
1494 conn
= l2cap_conn_add(hcon
, 0);
1501 if (hcon
->type
== LE_LINK
) {
1504 if (!list_empty(&conn
->chan_l
)) {
1513 /* Update source addr of the socket */
1514 bacpy(src
, conn
->src
);
1516 l2cap_chan_unlock(chan
);
1517 l2cap_chan_add(conn
, chan
);
1518 l2cap_chan_lock(chan
);
1520 l2cap_state_change(chan
, BT_CONNECT
);
1521 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1523 if (hcon
->state
== BT_CONNECTED
) {
1524 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1525 __clear_chan_timer(chan
);
1526 if (l2cap_chan_check_security(chan
))
1527 l2cap_state_change(chan
, BT_CONNECTED
);
1529 l2cap_do_start(chan
);
1535 l2cap_chan_unlock(chan
);
1536 hci_dev_unlock(hdev
);
1541 int __l2cap_wait_ack(struct sock
*sk
)
1543 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1544 DECLARE_WAITQUEUE(wait
, current
);
1548 add_wait_queue(sk_sleep(sk
), &wait
);
1549 set_current_state(TASK_INTERRUPTIBLE
);
1550 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1554 if (signal_pending(current
)) {
1555 err
= sock_intr_errno(timeo
);
1560 timeo
= schedule_timeout(timeo
);
1562 set_current_state(TASK_INTERRUPTIBLE
);
1564 err
= sock_error(sk
);
1568 set_current_state(TASK_RUNNING
);
1569 remove_wait_queue(sk_sleep(sk
), &wait
);
1573 static void l2cap_monitor_timeout(struct work_struct
*work
)
1575 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1576 monitor_timer
.work
);
1578 BT_DBG("chan %p", chan
);
1580 l2cap_chan_lock(chan
);
1582 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1583 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1584 l2cap_chan_unlock(chan
);
1585 l2cap_chan_put(chan
);
1589 chan
->retry_count
++;
1590 __set_monitor_timer(chan
);
1592 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1593 l2cap_chan_unlock(chan
);
1594 l2cap_chan_put(chan
);
1597 static void l2cap_retrans_timeout(struct work_struct
*work
)
1599 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1600 retrans_timer
.work
);
1602 BT_DBG("chan %p", chan
);
1604 l2cap_chan_lock(chan
);
1606 chan
->retry_count
= 1;
1607 __set_monitor_timer(chan
);
1609 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1611 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1613 l2cap_chan_unlock(chan
);
1614 l2cap_chan_put(chan
);
1617 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1619 struct sk_buff
*skb
;
1621 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1622 chan
->unacked_frames
) {
1623 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1626 skb
= skb_dequeue(&chan
->tx_q
);
1629 chan
->unacked_frames
--;
1632 if (!chan
->unacked_frames
)
1633 __clear_retrans_timer(chan
);
1636 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1638 struct sk_buff
*skb
;
1642 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1643 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1644 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1645 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1647 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1648 fcs
= crc16(0, (u8
*)skb
->data
,
1649 skb
->len
- L2CAP_FCS_SIZE
);
1650 put_unaligned_le16(fcs
,
1651 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1654 l2cap_do_send(chan
, skb
);
1656 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1660 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1662 struct sk_buff
*skb
, *tx_skb
;
1666 skb
= skb_peek(&chan
->tx_q
);
1670 while (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1671 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1674 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1677 if (chan
->remote_max_tx
&&
1678 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1679 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1683 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1684 bt_cb(skb
)->retries
++;
1686 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1687 control
&= __get_sar_mask(chan
);
1689 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1690 control
|= __set_ctrl_final(chan
);
1692 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1693 control
|= __set_txseq(chan
, tx_seq
);
1695 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1697 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1698 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1699 tx_skb
->len
- L2CAP_FCS_SIZE
);
1700 put_unaligned_le16(fcs
,
1701 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1704 l2cap_do_send(chan
, tx_skb
);
1707 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1709 struct sk_buff
*skb
, *tx_skb
;
1714 if (chan
->state
!= BT_CONNECTED
)
1717 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1719 if (chan
->remote_max_tx
&&
1720 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1721 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1725 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1727 bt_cb(skb
)->retries
++;
1729 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1730 control
&= __get_sar_mask(chan
);
1732 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1733 control
|= __set_ctrl_final(chan
);
1735 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1736 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1738 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1740 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1741 fcs
= crc16(0, (u8
*)skb
->data
,
1742 tx_skb
->len
- L2CAP_FCS_SIZE
);
1743 put_unaligned_le16(fcs
, skb
->data
+
1744 tx_skb
->len
- L2CAP_FCS_SIZE
);
1747 l2cap_do_send(chan
, tx_skb
);
1749 __set_retrans_timer(chan
);
1751 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1753 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1755 if (bt_cb(skb
)->retries
== 1) {
1756 chan
->unacked_frames
++;
1759 __clear_ack_timer(chan
);
1762 chan
->frames_sent
++;
1764 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1765 chan
->tx_send_head
= NULL
;
1767 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1773 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1777 if (!skb_queue_empty(&chan
->tx_q
))
1778 chan
->tx_send_head
= chan
->tx_q
.next
;
1780 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1781 ret
= l2cap_ertm_send(chan
);
1785 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1789 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1791 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1792 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1793 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1794 l2cap_send_sframe(chan
, control
);
1798 if (l2cap_ertm_send(chan
) > 0)
1801 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1802 l2cap_send_sframe(chan
, control
);
1805 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1807 __clear_ack_timer(chan
);
1808 __l2cap_send_ack(chan
);
1811 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1813 struct srej_list
*tail
;
1816 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1817 control
|= __set_ctrl_final(chan
);
1819 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1820 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1822 l2cap_send_sframe(chan
, control
);
1825 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1826 struct msghdr
*msg
, int len
,
1827 int count
, struct sk_buff
*skb
)
1829 struct l2cap_conn
*conn
= chan
->conn
;
1830 struct sk_buff
**frag
;
1833 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1839 /* Continuation fragments (no L2CAP header) */
1840 frag
= &skb_shinfo(skb
)->frag_list
;
1842 count
= min_t(unsigned int, conn
->mtu
, len
);
1844 *frag
= chan
->ops
->alloc_skb(chan
, count
,
1845 msg
->msg_flags
& MSG_DONTWAIT
);
1848 return PTR_ERR(*frag
);
1849 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1852 (*frag
)->priority
= skb
->priority
;
1857 frag
= &(*frag
)->next
;
1863 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1864 struct msghdr
*msg
, size_t len
,
1867 struct l2cap_conn
*conn
= chan
->conn
;
1868 struct sk_buff
*skb
;
1869 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1870 struct l2cap_hdr
*lh
;
1872 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1874 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1876 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1877 msg
->msg_flags
& MSG_DONTWAIT
);
1881 skb
->priority
= priority
;
1883 /* Create L2CAP header */
1884 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1885 lh
->cid
= cpu_to_le16(chan
->dcid
);
1886 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1887 put_unaligned(chan
->psm
, skb_put(skb
, 2));
1889 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1890 if (unlikely(err
< 0)) {
1892 return ERR_PTR(err
);
1897 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1898 struct msghdr
*msg
, size_t len
,
1901 struct l2cap_conn
*conn
= chan
->conn
;
1902 struct sk_buff
*skb
;
1903 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1904 struct l2cap_hdr
*lh
;
1906 BT_DBG("chan %p len %d", chan
, (int)len
);
1908 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1910 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1911 msg
->msg_flags
& MSG_DONTWAIT
);
1915 skb
->priority
= priority
;
1917 /* Create L2CAP header */
1918 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1919 lh
->cid
= cpu_to_le16(chan
->dcid
);
1920 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1922 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1923 if (unlikely(err
< 0)) {
1925 return ERR_PTR(err
);
1930 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1931 struct msghdr
*msg
, size_t len
,
1932 u32 control
, u16 sdulen
)
1934 struct l2cap_conn
*conn
= chan
->conn
;
1935 struct sk_buff
*skb
;
1936 int err
, count
, hlen
;
1937 struct l2cap_hdr
*lh
;
1939 BT_DBG("chan %p len %d", chan
, (int)len
);
1942 return ERR_PTR(-ENOTCONN
);
1944 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1945 hlen
= L2CAP_EXT_HDR_SIZE
;
1947 hlen
= L2CAP_ENH_HDR_SIZE
;
1950 hlen
+= L2CAP_SDULEN_SIZE
;
1952 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1953 hlen
+= L2CAP_FCS_SIZE
;
1955 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1957 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1958 msg
->msg_flags
& MSG_DONTWAIT
);
1962 /* Create L2CAP header */
1963 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1964 lh
->cid
= cpu_to_le16(chan
->dcid
);
1965 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1967 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
1970 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1972 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1973 if (unlikely(err
< 0)) {
1975 return ERR_PTR(err
);
1978 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1979 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1981 bt_cb(skb
)->retries
= 0;
1985 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1987 struct sk_buff
*skb
;
1988 struct sk_buff_head sar_queue
;
1992 skb_queue_head_init(&sar_queue
);
1993 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1994 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1996 return PTR_ERR(skb
);
1998 __skb_queue_tail(&sar_queue
, skb
);
1999 len
-= chan
->remote_mps
;
2000 size
+= chan
->remote_mps
;
2005 if (len
> chan
->remote_mps
) {
2006 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
2007 buflen
= chan
->remote_mps
;
2009 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
2013 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
2015 skb_queue_purge(&sar_queue
);
2016 return PTR_ERR(skb
);
2019 __skb_queue_tail(&sar_queue
, skb
);
2023 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
2024 if (chan
->tx_send_head
== NULL
)
2025 chan
->tx_send_head
= sar_queue
.next
;
2030 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2033 struct sk_buff
*skb
;
2037 /* Connectionless channel */
2038 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2039 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2041 return PTR_ERR(skb
);
2043 l2cap_do_send(chan
, skb
);
2047 switch (chan
->mode
) {
2048 case L2CAP_MODE_BASIC
:
2049 /* Check outgoing MTU */
2050 if (len
> chan
->omtu
)
2053 /* Create a basic PDU */
2054 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2056 return PTR_ERR(skb
);
2058 l2cap_do_send(chan
, skb
);
2062 case L2CAP_MODE_ERTM
:
2063 case L2CAP_MODE_STREAMING
:
2064 /* Entire SDU fits into one PDU */
2065 if (len
<= chan
->remote_mps
) {
2066 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
2067 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
2070 return PTR_ERR(skb
);
2072 __skb_queue_tail(&chan
->tx_q
, skb
);
2074 if (chan
->tx_send_head
== NULL
)
2075 chan
->tx_send_head
= skb
;
2078 /* Segment SDU into multiples PDUs */
2079 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
2084 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
2085 l2cap_streaming_send(chan
);
2090 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
2091 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
2096 err
= l2cap_ertm_send(chan
);
2103 BT_DBG("bad state %1.1x", chan
->mode
);
2110 /* Copy frame to all raw sockets on that connection */
2111 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2113 struct sk_buff
*nskb
;
2114 struct l2cap_chan
*chan
;
2116 BT_DBG("conn %p", conn
);
2118 mutex_lock(&conn
->chan_lock
);
2120 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2121 struct sock
*sk
= chan
->sk
;
2122 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2125 /* Don't send frame to the socket it came from */
2128 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2132 if (chan
->ops
->recv(chan
->data
, nskb
))
2136 mutex_unlock(&conn
->chan_lock
);
2139 /* ---- L2CAP signalling commands ---- */
2140 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2141 u8 code
, u8 ident
, u16 dlen
, void *data
)
2143 struct sk_buff
*skb
, **frag
;
2144 struct l2cap_cmd_hdr
*cmd
;
2145 struct l2cap_hdr
*lh
;
2148 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2149 conn
, code
, ident
, dlen
);
2151 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2152 count
= min_t(unsigned int, conn
->mtu
, len
);
2154 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2158 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2159 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2161 if (conn
->hcon
->type
== LE_LINK
)
2162 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2164 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2166 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2169 cmd
->len
= cpu_to_le16(dlen
);
2172 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2173 memcpy(skb_put(skb
, count
), data
, count
);
2179 /* Continuation fragments (no L2CAP header) */
2180 frag
= &skb_shinfo(skb
)->frag_list
;
2182 count
= min_t(unsigned int, conn
->mtu
, len
);
2184 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2188 memcpy(skb_put(*frag
, count
), data
, count
);
2193 frag
= &(*frag
)->next
;
2203 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2205 struct l2cap_conf_opt
*opt
= *ptr
;
2208 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2216 *val
= *((u8
*) opt
->val
);
2220 *val
= get_unaligned_le16(opt
->val
);
2224 *val
= get_unaligned_le32(opt
->val
);
2228 *val
= (unsigned long) opt
->val
;
2232 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2236 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2238 struct l2cap_conf_opt
*opt
= *ptr
;
2240 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2247 *((u8
*) opt
->val
) = val
;
2251 put_unaligned_le16(val
, opt
->val
);
2255 put_unaligned_le32(val
, opt
->val
);
2259 memcpy(opt
->val
, (void *) val
, len
);
2263 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2266 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2268 struct l2cap_conf_efs efs
;
2270 switch (chan
->mode
) {
2271 case L2CAP_MODE_ERTM
:
2272 efs
.id
= chan
->local_id
;
2273 efs
.stype
= chan
->local_stype
;
2274 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2275 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2276 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2277 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2280 case L2CAP_MODE_STREAMING
:
2282 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2283 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2284 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2293 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2294 (unsigned long) &efs
);
2297 static void l2cap_ack_timeout(struct work_struct
*work
)
2299 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2302 BT_DBG("chan %p", chan
);
2304 l2cap_chan_lock(chan
);
2306 __l2cap_send_ack(chan
);
2308 l2cap_chan_unlock(chan
);
2310 l2cap_chan_put(chan
);
2313 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2317 chan
->expected_ack_seq
= 0;
2318 chan
->unacked_frames
= 0;
2319 chan
->buffer_seq
= 0;
2320 chan
->num_acked
= 0;
2321 chan
->frames_sent
= 0;
2323 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2324 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2325 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2327 skb_queue_head_init(&chan
->srej_q
);
2329 INIT_LIST_HEAD(&chan
->srej_l
);
2330 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2334 return l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2337 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2340 case L2CAP_MODE_STREAMING
:
2341 case L2CAP_MODE_ERTM
:
2342 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2346 return L2CAP_MODE_BASIC
;
2350 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2352 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2355 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2357 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2360 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2362 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2363 __l2cap_ews_supported(chan
)) {
2364 /* use extended control field */
2365 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2366 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2368 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2369 L2CAP_DEFAULT_TX_WINDOW
);
2370 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2374 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2376 struct l2cap_conf_req
*req
= data
;
2377 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2378 void *ptr
= req
->data
;
2381 BT_DBG("chan %p", chan
);
2383 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2386 switch (chan
->mode
) {
2387 case L2CAP_MODE_STREAMING
:
2388 case L2CAP_MODE_ERTM
:
2389 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2392 if (__l2cap_efs_supported(chan
))
2393 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2397 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2402 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2403 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2405 switch (chan
->mode
) {
2406 case L2CAP_MODE_BASIC
:
2407 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2408 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2411 rfc
.mode
= L2CAP_MODE_BASIC
;
2413 rfc
.max_transmit
= 0;
2414 rfc
.retrans_timeout
= 0;
2415 rfc
.monitor_timeout
= 0;
2416 rfc
.max_pdu_size
= 0;
2418 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2419 (unsigned long) &rfc
);
2422 case L2CAP_MODE_ERTM
:
2423 rfc
.mode
= L2CAP_MODE_ERTM
;
2424 rfc
.max_transmit
= chan
->max_tx
;
2425 rfc
.retrans_timeout
= 0;
2426 rfc
.monitor_timeout
= 0;
2428 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2429 L2CAP_EXT_HDR_SIZE
-
2432 rfc
.max_pdu_size
= cpu_to_le16(size
);
2434 l2cap_txwin_setup(chan
);
2436 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2437 L2CAP_DEFAULT_TX_WINDOW
);
2439 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2440 (unsigned long) &rfc
);
2442 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2443 l2cap_add_opt_efs(&ptr
, chan
);
2445 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2448 if (chan
->fcs
== L2CAP_FCS_NONE
||
2449 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2450 chan
->fcs
= L2CAP_FCS_NONE
;
2451 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2454 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2455 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2459 case L2CAP_MODE_STREAMING
:
2460 rfc
.mode
= L2CAP_MODE_STREAMING
;
2462 rfc
.max_transmit
= 0;
2463 rfc
.retrans_timeout
= 0;
2464 rfc
.monitor_timeout
= 0;
2466 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2467 L2CAP_EXT_HDR_SIZE
-
2470 rfc
.max_pdu_size
= cpu_to_le16(size
);
2472 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2473 (unsigned long) &rfc
);
2475 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2476 l2cap_add_opt_efs(&ptr
, chan
);
2478 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2481 if (chan
->fcs
== L2CAP_FCS_NONE
||
2482 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2483 chan
->fcs
= L2CAP_FCS_NONE
;
2484 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2489 req
->dcid
= cpu_to_le16(chan
->dcid
);
2490 req
->flags
= cpu_to_le16(0);
2495 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2497 struct l2cap_conf_rsp
*rsp
= data
;
2498 void *ptr
= rsp
->data
;
2499 void *req
= chan
->conf_req
;
2500 int len
= chan
->conf_len
;
2501 int type
, hint
, olen
;
2503 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2504 struct l2cap_conf_efs efs
;
2506 u16 mtu
= L2CAP_DEFAULT_MTU
;
2507 u16 result
= L2CAP_CONF_SUCCESS
;
2510 BT_DBG("chan %p", chan
);
2512 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2513 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2515 hint
= type
& L2CAP_CONF_HINT
;
2516 type
&= L2CAP_CONF_MASK
;
2519 case L2CAP_CONF_MTU
:
2523 case L2CAP_CONF_FLUSH_TO
:
2524 chan
->flush_to
= val
;
2527 case L2CAP_CONF_QOS
:
2530 case L2CAP_CONF_RFC
:
2531 if (olen
== sizeof(rfc
))
2532 memcpy(&rfc
, (void *) val
, olen
);
2535 case L2CAP_CONF_FCS
:
2536 if (val
== L2CAP_FCS_NONE
)
2537 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2540 case L2CAP_CONF_EFS
:
2542 if (olen
== sizeof(efs
))
2543 memcpy(&efs
, (void *) val
, olen
);
2546 case L2CAP_CONF_EWS
:
2548 return -ECONNREFUSED
;
2550 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2551 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2552 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2553 chan
->remote_tx_win
= val
;
2560 result
= L2CAP_CONF_UNKNOWN
;
2561 *((u8
*) ptr
++) = type
;
2566 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2569 switch (chan
->mode
) {
2570 case L2CAP_MODE_STREAMING
:
2571 case L2CAP_MODE_ERTM
:
2572 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2573 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2574 chan
->conn
->feat_mask
);
2579 if (__l2cap_efs_supported(chan
))
2580 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2582 return -ECONNREFUSED
;
2585 if (chan
->mode
!= rfc
.mode
)
2586 return -ECONNREFUSED
;
2592 if (chan
->mode
!= rfc
.mode
) {
2593 result
= L2CAP_CONF_UNACCEPT
;
2594 rfc
.mode
= chan
->mode
;
2596 if (chan
->num_conf_rsp
== 1)
2597 return -ECONNREFUSED
;
2599 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2600 sizeof(rfc
), (unsigned long) &rfc
);
2603 if (result
== L2CAP_CONF_SUCCESS
) {
2604 /* Configure output options and let the other side know
2605 * which ones we don't like. */
2607 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2608 result
= L2CAP_CONF_UNACCEPT
;
2611 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2613 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2616 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2617 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2618 efs
.stype
!= chan
->local_stype
) {
2620 result
= L2CAP_CONF_UNACCEPT
;
2622 if (chan
->num_conf_req
>= 1)
2623 return -ECONNREFUSED
;
2625 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2627 (unsigned long) &efs
);
2629 /* Send PENDING Conf Rsp */
2630 result
= L2CAP_CONF_PENDING
;
2631 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2636 case L2CAP_MODE_BASIC
:
2637 chan
->fcs
= L2CAP_FCS_NONE
;
2638 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2641 case L2CAP_MODE_ERTM
:
2642 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2643 chan
->remote_tx_win
= rfc
.txwin_size
;
2645 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2647 chan
->remote_max_tx
= rfc
.max_transmit
;
2649 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2651 L2CAP_EXT_HDR_SIZE
-
2654 rfc
.max_pdu_size
= cpu_to_le16(size
);
2655 chan
->remote_mps
= size
;
2657 rfc
.retrans_timeout
=
2658 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2659 rfc
.monitor_timeout
=
2660 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2662 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2664 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2665 sizeof(rfc
), (unsigned long) &rfc
);
2667 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2668 chan
->remote_id
= efs
.id
;
2669 chan
->remote_stype
= efs
.stype
;
2670 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2671 chan
->remote_flush_to
=
2672 le32_to_cpu(efs
.flush_to
);
2673 chan
->remote_acc_lat
=
2674 le32_to_cpu(efs
.acc_lat
);
2675 chan
->remote_sdu_itime
=
2676 le32_to_cpu(efs
.sdu_itime
);
2677 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2678 sizeof(efs
), (unsigned long) &efs
);
2682 case L2CAP_MODE_STREAMING
:
2683 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2685 L2CAP_EXT_HDR_SIZE
-
2688 rfc
.max_pdu_size
= cpu_to_le16(size
);
2689 chan
->remote_mps
= size
;
2691 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2693 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2694 sizeof(rfc
), (unsigned long) &rfc
);
2699 result
= L2CAP_CONF_UNACCEPT
;
2701 memset(&rfc
, 0, sizeof(rfc
));
2702 rfc
.mode
= chan
->mode
;
2705 if (result
== L2CAP_CONF_SUCCESS
)
2706 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2708 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2709 rsp
->result
= cpu_to_le16(result
);
2710 rsp
->flags
= cpu_to_le16(0x0000);
2715 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2717 struct l2cap_conf_req
*req
= data
;
2718 void *ptr
= req
->data
;
2721 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2722 struct l2cap_conf_efs efs
;
2724 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2726 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2727 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2730 case L2CAP_CONF_MTU
:
2731 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2732 *result
= L2CAP_CONF_UNACCEPT
;
2733 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2736 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2739 case L2CAP_CONF_FLUSH_TO
:
2740 chan
->flush_to
= val
;
2741 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2745 case L2CAP_CONF_RFC
:
2746 if (olen
== sizeof(rfc
))
2747 memcpy(&rfc
, (void *)val
, olen
);
2749 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2750 rfc
.mode
!= chan
->mode
)
2751 return -ECONNREFUSED
;
2755 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2756 sizeof(rfc
), (unsigned long) &rfc
);
2759 case L2CAP_CONF_EWS
:
2760 chan
->tx_win
= min_t(u16
, val
,
2761 L2CAP_DEFAULT_EXT_WINDOW
);
2762 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2766 case L2CAP_CONF_EFS
:
2767 if (olen
== sizeof(efs
))
2768 memcpy(&efs
, (void *)val
, olen
);
2770 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2771 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2772 efs
.stype
!= chan
->local_stype
)
2773 return -ECONNREFUSED
;
2775 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2776 sizeof(efs
), (unsigned long) &efs
);
2781 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2782 return -ECONNREFUSED
;
2784 chan
->mode
= rfc
.mode
;
2786 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2788 case L2CAP_MODE_ERTM
:
2789 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2790 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2791 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2793 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2794 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2795 chan
->local_sdu_itime
=
2796 le32_to_cpu(efs
.sdu_itime
);
2797 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2798 chan
->local_flush_to
=
2799 le32_to_cpu(efs
.flush_to
);
2803 case L2CAP_MODE_STREAMING
:
2804 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2808 req
->dcid
= cpu_to_le16(chan
->dcid
);
2809 req
->flags
= cpu_to_le16(0x0000);
2814 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2816 struct l2cap_conf_rsp
*rsp
= data
;
2817 void *ptr
= rsp
->data
;
2819 BT_DBG("chan %p", chan
);
2821 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2822 rsp
->result
= cpu_to_le16(result
);
2823 rsp
->flags
= cpu_to_le16(flags
);
2828 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2830 struct l2cap_conn_rsp rsp
;
2831 struct l2cap_conn
*conn
= chan
->conn
;
2834 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2835 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2836 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2837 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2838 l2cap_send_cmd(conn
, chan
->ident
,
2839 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2841 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2844 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2845 l2cap_build_conf_req(chan
, buf
), buf
);
2846 chan
->num_conf_req
++;
2849 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2853 struct l2cap_conf_rfc rfc
;
2855 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2857 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2860 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2861 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2864 case L2CAP_CONF_RFC
:
2865 if (olen
== sizeof(rfc
))
2866 memcpy(&rfc
, (void *)val
, olen
);
2871 /* Use sane default values in case a misbehaving remote device
2872 * did not send an RFC option.
2874 rfc
.mode
= chan
->mode
;
2875 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2876 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2877 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2879 BT_ERR("Expected RFC option was not found, using defaults");
2883 case L2CAP_MODE_ERTM
:
2884 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2885 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2886 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2888 case L2CAP_MODE_STREAMING
:
2889 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2893 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2895 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2897 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2900 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2901 cmd
->ident
== conn
->info_ident
) {
2902 cancel_delayed_work(&conn
->info_timer
);
2904 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2905 conn
->info_ident
= 0;
2907 l2cap_conn_start(conn
);
2913 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2915 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2916 struct l2cap_conn_rsp rsp
;
2917 struct l2cap_chan
*chan
= NULL
, *pchan
;
2918 struct sock
*parent
, *sk
= NULL
;
2919 int result
, status
= L2CAP_CS_NO_INFO
;
2921 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2922 __le16 psm
= req
->psm
;
2924 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
2926 /* Check if we have socket listening on psm */
2927 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
2929 result
= L2CAP_CR_BAD_PSM
;
2935 mutex_lock(&conn
->chan_lock
);
2938 /* Check if the ACL is secure enough (if not SDP) */
2939 if (psm
!= cpu_to_le16(0x0001) &&
2940 !hci_conn_check_link_mode(conn
->hcon
)) {
2941 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2942 result
= L2CAP_CR_SEC_BLOCK
;
2946 result
= L2CAP_CR_NO_MEM
;
2948 /* Check for backlog size */
2949 if (sk_acceptq_is_full(parent
)) {
2950 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2954 chan
= pchan
->ops
->new_connection(pchan
->data
);
2960 /* Check if we already have channel with that dcid */
2961 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2962 sock_set_flag(sk
, SOCK_ZAPPED
);
2963 chan
->ops
->close(chan
->data
);
2967 hci_conn_hold(conn
->hcon
);
2969 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2970 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2974 bt_accept_enqueue(parent
, sk
);
2976 __l2cap_chan_add(conn
, chan
);
2980 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2982 chan
->ident
= cmd
->ident
;
2984 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2985 if (l2cap_chan_check_security(chan
)) {
2986 if (bt_sk(sk
)->defer_setup
) {
2987 __l2cap_state_change(chan
, BT_CONNECT2
);
2988 result
= L2CAP_CR_PEND
;
2989 status
= L2CAP_CS_AUTHOR_PEND
;
2990 parent
->sk_data_ready(parent
, 0);
2992 __l2cap_state_change(chan
, BT_CONFIG
);
2993 result
= L2CAP_CR_SUCCESS
;
2994 status
= L2CAP_CS_NO_INFO
;
2997 __l2cap_state_change(chan
, BT_CONNECT2
);
2998 result
= L2CAP_CR_PEND
;
2999 status
= L2CAP_CS_AUTHEN_PEND
;
3002 __l2cap_state_change(chan
, BT_CONNECT2
);
3003 result
= L2CAP_CR_PEND
;
3004 status
= L2CAP_CS_NO_INFO
;
3008 release_sock(parent
);
3009 mutex_unlock(&conn
->chan_lock
);
3012 rsp
.scid
= cpu_to_le16(scid
);
3013 rsp
.dcid
= cpu_to_le16(dcid
);
3014 rsp
.result
= cpu_to_le16(result
);
3015 rsp
.status
= cpu_to_le16(status
);
3016 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3018 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3019 struct l2cap_info_req info
;
3020 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3022 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3023 conn
->info_ident
= l2cap_get_ident(conn
);
3025 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3027 l2cap_send_cmd(conn
, conn
->info_ident
,
3028 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3031 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3032 result
== L2CAP_CR_SUCCESS
) {
3034 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3035 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3036 l2cap_build_conf_req(chan
, buf
), buf
);
3037 chan
->num_conf_req
++;
3043 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3045 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3046 u16 scid
, dcid
, result
, status
;
3047 struct l2cap_chan
*chan
;
3051 scid
= __le16_to_cpu(rsp
->scid
);
3052 dcid
= __le16_to_cpu(rsp
->dcid
);
3053 result
= __le16_to_cpu(rsp
->result
);
3054 status
= __le16_to_cpu(rsp
->status
);
3056 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3057 dcid
, scid
, result
, status
);
3059 mutex_lock(&conn
->chan_lock
);
3062 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3068 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3077 l2cap_chan_lock(chan
);
3080 case L2CAP_CR_SUCCESS
:
3081 l2cap_state_change(chan
, BT_CONFIG
);
3084 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3086 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3089 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3090 l2cap_build_conf_req(chan
, req
), req
);
3091 chan
->num_conf_req
++;
3095 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3099 l2cap_chan_del(chan
, ECONNREFUSED
);
3103 l2cap_chan_unlock(chan
);
3106 mutex_unlock(&conn
->chan_lock
);
3111 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3113 /* FCS is enabled only in ERTM or streaming mode, if one or both
3116 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3117 chan
->fcs
= L2CAP_FCS_NONE
;
3118 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3119 chan
->fcs
= L2CAP_FCS_CRC16
;
3122 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3124 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3127 struct l2cap_chan
*chan
;
3130 dcid
= __le16_to_cpu(req
->dcid
);
3131 flags
= __le16_to_cpu(req
->flags
);
3133 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3135 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3139 l2cap_chan_lock(chan
);
3141 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3142 struct l2cap_cmd_rej_cid rej
;
3144 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3145 rej
.scid
= cpu_to_le16(chan
->scid
);
3146 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3148 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3153 /* Reject if config buffer is too small. */
3154 len
= cmd_len
- sizeof(*req
);
3155 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3156 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3157 l2cap_build_conf_rsp(chan
, rsp
,
3158 L2CAP_CONF_REJECT
, flags
), rsp
);
3163 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3164 chan
->conf_len
+= len
;
3166 if (flags
& 0x0001) {
3167 /* Incomplete config. Send empty response. */
3168 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3169 l2cap_build_conf_rsp(chan
, rsp
,
3170 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3174 /* Complete config. */
3175 len
= l2cap_parse_conf_req(chan
, rsp
);
3177 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3181 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3182 chan
->num_conf_rsp
++;
3184 /* Reset config buffer. */
3187 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3190 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3191 set_default_fcs(chan
);
3193 l2cap_state_change(chan
, BT_CONNECTED
);
3195 chan
->next_tx_seq
= 0;
3196 chan
->expected_tx_seq
= 0;
3197 skb_queue_head_init(&chan
->tx_q
);
3198 if (chan
->mode
== L2CAP_MODE_ERTM
)
3199 err
= l2cap_ertm_init(chan
);
3202 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3204 l2cap_chan_ready(chan
);
3209 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3211 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3212 l2cap_build_conf_req(chan
, buf
), buf
);
3213 chan
->num_conf_req
++;
3216 /* Got Conf Rsp PENDING from remote side and asume we sent
3217 Conf Rsp PENDING in the code above */
3218 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3219 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3221 /* check compatibility */
3223 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3224 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3226 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3227 l2cap_build_conf_rsp(chan
, rsp
,
3228 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3232 l2cap_chan_unlock(chan
);
3236 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3238 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3239 u16 scid
, flags
, result
;
3240 struct l2cap_chan
*chan
;
3241 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3244 scid
= __le16_to_cpu(rsp
->scid
);
3245 flags
= __le16_to_cpu(rsp
->flags
);
3246 result
= __le16_to_cpu(rsp
->result
);
3248 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3251 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3255 l2cap_chan_lock(chan
);
3258 case L2CAP_CONF_SUCCESS
:
3259 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3260 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3263 case L2CAP_CONF_PENDING
:
3264 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3266 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3269 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3272 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3276 /* check compatibility */
3278 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3279 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3281 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3282 l2cap_build_conf_rsp(chan
, buf
,
3283 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3287 case L2CAP_CONF_UNACCEPT
:
3288 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3291 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3292 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3296 /* throw out any old stored conf requests */
3297 result
= L2CAP_CONF_SUCCESS
;
3298 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3301 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3305 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3306 L2CAP_CONF_REQ
, len
, req
);
3307 chan
->num_conf_req
++;
3308 if (result
!= L2CAP_CONF_SUCCESS
)
3314 l2cap_chan_set_err(chan
, ECONNRESET
);
3316 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3317 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3324 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3326 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3327 set_default_fcs(chan
);
3329 l2cap_state_change(chan
, BT_CONNECTED
);
3330 chan
->next_tx_seq
= 0;
3331 chan
->expected_tx_seq
= 0;
3332 skb_queue_head_init(&chan
->tx_q
);
3333 if (chan
->mode
== L2CAP_MODE_ERTM
)
3334 err
= l2cap_ertm_init(chan
);
3337 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3339 l2cap_chan_ready(chan
);
3343 l2cap_chan_unlock(chan
);
3347 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3349 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3350 struct l2cap_disconn_rsp rsp
;
3352 struct l2cap_chan
*chan
;
3355 scid
= __le16_to_cpu(req
->scid
);
3356 dcid
= __le16_to_cpu(req
->dcid
);
3358 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3360 mutex_lock(&conn
->chan_lock
);
3362 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3364 mutex_unlock(&conn
->chan_lock
);
3368 l2cap_chan_lock(chan
);
3372 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3373 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3374 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3377 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3380 l2cap_chan_del(chan
, ECONNRESET
);
3382 l2cap_chan_unlock(chan
);
3384 chan
->ops
->close(chan
->data
);
3386 mutex_unlock(&conn
->chan_lock
);
3391 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3393 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3395 struct l2cap_chan
*chan
;
3397 scid
= __le16_to_cpu(rsp
->scid
);
3398 dcid
= __le16_to_cpu(rsp
->dcid
);
3400 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3402 mutex_lock(&conn
->chan_lock
);
3404 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3406 mutex_unlock(&conn
->chan_lock
);
3410 l2cap_chan_lock(chan
);
3412 l2cap_chan_del(chan
, 0);
3414 l2cap_chan_unlock(chan
);
3416 chan
->ops
->close(chan
->data
);
3418 mutex_unlock(&conn
->chan_lock
);
3423 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3425 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3428 type
= __le16_to_cpu(req
->type
);
3430 BT_DBG("type 0x%4.4x", type
);
3432 if (type
== L2CAP_IT_FEAT_MASK
) {
3434 u32 feat_mask
= l2cap_feat_mask
;
3435 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3436 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3437 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3439 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3442 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3443 | L2CAP_FEAT_EXT_WINDOW
;
3445 put_unaligned_le32(feat_mask
, rsp
->data
);
3446 l2cap_send_cmd(conn
, cmd
->ident
,
3447 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3448 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3450 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3453 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3455 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3457 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3458 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3459 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3460 l2cap_send_cmd(conn
, cmd
->ident
,
3461 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3463 struct l2cap_info_rsp rsp
;
3464 rsp
.type
= cpu_to_le16(type
);
3465 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3466 l2cap_send_cmd(conn
, cmd
->ident
,
3467 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3473 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3475 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3478 type
= __le16_to_cpu(rsp
->type
);
3479 result
= __le16_to_cpu(rsp
->result
);
3481 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3483 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3484 if (cmd
->ident
!= conn
->info_ident
||
3485 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3488 cancel_delayed_work(&conn
->info_timer
);
3490 if (result
!= L2CAP_IR_SUCCESS
) {
3491 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3492 conn
->info_ident
= 0;
3494 l2cap_conn_start(conn
);
3500 case L2CAP_IT_FEAT_MASK
:
3501 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3503 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3504 struct l2cap_info_req req
;
3505 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3507 conn
->info_ident
= l2cap_get_ident(conn
);
3509 l2cap_send_cmd(conn
, conn
->info_ident
,
3510 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3512 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3513 conn
->info_ident
= 0;
3515 l2cap_conn_start(conn
);
3519 case L2CAP_IT_FIXED_CHAN
:
3520 conn
->fixed_chan_mask
= rsp
->data
[0];
3521 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3522 conn
->info_ident
= 0;
3524 l2cap_conn_start(conn
);
3531 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3532 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3535 struct l2cap_create_chan_req
*req
= data
;
3536 struct l2cap_create_chan_rsp rsp
;
3539 if (cmd_len
!= sizeof(*req
))
3545 psm
= le16_to_cpu(req
->psm
);
3546 scid
= le16_to_cpu(req
->scid
);
3548 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3550 /* Placeholder: Always reject */
3552 rsp
.scid
= cpu_to_le16(scid
);
3553 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3554 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3556 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3562 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3563 struct l2cap_cmd_hdr
*cmd
, void *data
)
3565 BT_DBG("conn %p", conn
);
3567 return l2cap_connect_rsp(conn
, cmd
, data
);
3570 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3571 u16 icid
, u16 result
)
3573 struct l2cap_move_chan_rsp rsp
;
3575 BT_DBG("icid %d, result %d", icid
, result
);
3577 rsp
.icid
= cpu_to_le16(icid
);
3578 rsp
.result
= cpu_to_le16(result
);
3580 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3583 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3584 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3586 struct l2cap_move_chan_cfm cfm
;
3589 BT_DBG("icid %d, result %d", icid
, result
);
3591 ident
= l2cap_get_ident(conn
);
3593 chan
->ident
= ident
;
3595 cfm
.icid
= cpu_to_le16(icid
);
3596 cfm
.result
= cpu_to_le16(result
);
3598 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3601 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3604 struct l2cap_move_chan_cfm_rsp rsp
;
3606 BT_DBG("icid %d", icid
);
3608 rsp
.icid
= cpu_to_le16(icid
);
3609 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3612 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3613 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3615 struct l2cap_move_chan_req
*req
= data
;
3617 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3619 if (cmd_len
!= sizeof(*req
))
3622 icid
= le16_to_cpu(req
->icid
);
3624 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3629 /* Placeholder: Always refuse */
3630 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3635 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3636 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3638 struct l2cap_move_chan_rsp
*rsp
= data
;
3641 if (cmd_len
!= sizeof(*rsp
))
3644 icid
= le16_to_cpu(rsp
->icid
);
3645 result
= le16_to_cpu(rsp
->result
);
3647 BT_DBG("icid %d, result %d", icid
, result
);
3649 /* Placeholder: Always unconfirmed */
3650 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3655 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3656 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3658 struct l2cap_move_chan_cfm
*cfm
= data
;
3661 if (cmd_len
!= sizeof(*cfm
))
3664 icid
= le16_to_cpu(cfm
->icid
);
3665 result
= le16_to_cpu(cfm
->result
);
3667 BT_DBG("icid %d, result %d", icid
, result
);
3669 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3674 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3675 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3677 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3680 if (cmd_len
!= sizeof(*rsp
))
3683 icid
= le16_to_cpu(rsp
->icid
);
3685 BT_DBG("icid %d", icid
);
3690 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3695 if (min
> max
|| min
< 6 || max
> 3200)
3698 if (to_multiplier
< 10 || to_multiplier
> 3200)
3701 if (max
>= to_multiplier
* 8)
3704 max_latency
= (to_multiplier
* 8 / max
) - 1;
3705 if (latency
> 499 || latency
> max_latency
)
3711 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3712 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3714 struct hci_conn
*hcon
= conn
->hcon
;
3715 struct l2cap_conn_param_update_req
*req
;
3716 struct l2cap_conn_param_update_rsp rsp
;
3717 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3720 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3723 cmd_len
= __le16_to_cpu(cmd
->len
);
3724 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3727 req
= (struct l2cap_conn_param_update_req
*) data
;
3728 min
= __le16_to_cpu(req
->min
);
3729 max
= __le16_to_cpu(req
->max
);
3730 latency
= __le16_to_cpu(req
->latency
);
3731 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3733 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3734 min
, max
, latency
, to_multiplier
);
3736 memset(&rsp
, 0, sizeof(rsp
));
3738 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3740 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3742 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3744 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3748 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3753 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3754 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3758 switch (cmd
->code
) {
3759 case L2CAP_COMMAND_REJ
:
3760 l2cap_command_rej(conn
, cmd
, data
);
3763 case L2CAP_CONN_REQ
:
3764 err
= l2cap_connect_req(conn
, cmd
, data
);
3767 case L2CAP_CONN_RSP
:
3768 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3771 case L2CAP_CONF_REQ
:
3772 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3775 case L2CAP_CONF_RSP
:
3776 err
= l2cap_config_rsp(conn
, cmd
, data
);
3779 case L2CAP_DISCONN_REQ
:
3780 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3783 case L2CAP_DISCONN_RSP
:
3784 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3787 case L2CAP_ECHO_REQ
:
3788 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3791 case L2CAP_ECHO_RSP
:
3794 case L2CAP_INFO_REQ
:
3795 err
= l2cap_information_req(conn
, cmd
, data
);
3798 case L2CAP_INFO_RSP
:
3799 err
= l2cap_information_rsp(conn
, cmd
, data
);
3802 case L2CAP_CREATE_CHAN_REQ
:
3803 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3806 case L2CAP_CREATE_CHAN_RSP
:
3807 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3810 case L2CAP_MOVE_CHAN_REQ
:
3811 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3814 case L2CAP_MOVE_CHAN_RSP
:
3815 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3818 case L2CAP_MOVE_CHAN_CFM
:
3819 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3822 case L2CAP_MOVE_CHAN_CFM_RSP
:
3823 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3827 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3835 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3836 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3838 switch (cmd
->code
) {
3839 case L2CAP_COMMAND_REJ
:
3842 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3843 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3845 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3849 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3854 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3855 struct sk_buff
*skb
)
3857 u8
*data
= skb
->data
;
3859 struct l2cap_cmd_hdr cmd
;
3862 l2cap_raw_recv(conn
, skb
);
3864 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3866 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3867 data
+= L2CAP_CMD_HDR_SIZE
;
3868 len
-= L2CAP_CMD_HDR_SIZE
;
3870 cmd_len
= le16_to_cpu(cmd
.len
);
3872 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3874 if (cmd_len
> len
|| !cmd
.ident
) {
3875 BT_DBG("corrupted command");
3879 if (conn
->hcon
->type
== LE_LINK
)
3880 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3882 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3885 struct l2cap_cmd_rej_unk rej
;
3887 BT_ERR("Wrong link type (%d)", err
);
3889 /* FIXME: Map err to a valid reason */
3890 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3891 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3901 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3903 u16 our_fcs
, rcv_fcs
;
3906 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3907 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3909 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3911 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3912 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3913 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3914 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3916 if (our_fcs
!= rcv_fcs
)
3922 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3926 chan
->frames_sent
= 0;
3928 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3930 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3931 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3932 l2cap_send_sframe(chan
, control
);
3933 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3936 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3937 l2cap_retransmit_frames(chan
);
3939 l2cap_ertm_send(chan
);
3941 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3942 chan
->frames_sent
== 0) {
3943 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3944 l2cap_send_sframe(chan
, control
);
3948 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3950 struct sk_buff
*next_skb
;
3951 int tx_seq_offset
, next_tx_seq_offset
;
3953 bt_cb(skb
)->tx_seq
= tx_seq
;
3954 bt_cb(skb
)->sar
= sar
;
3956 next_skb
= skb_peek(&chan
->srej_q
);
3958 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3961 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3964 next_tx_seq_offset
= __seq_offset(chan
,
3965 bt_cb(next_skb
)->tx_seq
, chan
->buffer_seq
);
3967 if (next_tx_seq_offset
> tx_seq_offset
) {
3968 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3972 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3975 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
3978 __skb_queue_tail(&chan
->srej_q
, skb
);
3983 static void append_skb_frag(struct sk_buff
*skb
,
3984 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3986 /* skb->len reflects data in skb as well as all fragments
3987 * skb->data_len reflects only data in fragments
3989 if (!skb_has_frag_list(skb
))
3990 skb_shinfo(skb
)->frag_list
= new_frag
;
3992 new_frag
->next
= NULL
;
3994 (*last_frag
)->next
= new_frag
;
3995 *last_frag
= new_frag
;
3997 skb
->len
+= new_frag
->len
;
3998 skb
->data_len
+= new_frag
->len
;
3999 skb
->truesize
+= new_frag
->truesize
;
4002 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
4006 switch (__get_ctrl_sar(chan
, control
)) {
4007 case L2CAP_SAR_UNSEGMENTED
:
4011 err
= chan
->ops
->recv(chan
->data
, skb
);
4014 case L2CAP_SAR_START
:
4018 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4019 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4021 if (chan
->sdu_len
> chan
->imtu
) {
4026 if (skb
->len
>= chan
->sdu_len
)
4030 chan
->sdu_last_frag
= skb
;
4036 case L2CAP_SAR_CONTINUE
:
4040 append_skb_frag(chan
->sdu
, skb
,
4041 &chan
->sdu_last_frag
);
4044 if (chan
->sdu
->len
>= chan
->sdu_len
)
4054 append_skb_frag(chan
->sdu
, skb
,
4055 &chan
->sdu_last_frag
);
4058 if (chan
->sdu
->len
!= chan
->sdu_len
)
4061 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4064 /* Reassembly complete */
4066 chan
->sdu_last_frag
= NULL
;
4074 kfree_skb(chan
->sdu
);
4076 chan
->sdu_last_frag
= NULL
;
4083 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
4085 BT_DBG("chan %p, Enter local busy", chan
);
4087 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4088 l2cap_seq_list_clear(&chan
->srej_list
);
4090 __set_ack_timer(chan
);
4093 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
4097 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4100 control
= __set_reqseq(chan
, chan
->buffer_seq
);
4101 control
|= __set_ctrl_poll(chan
);
4102 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4103 l2cap_send_sframe(chan
, control
);
4104 chan
->retry_count
= 1;
4106 __clear_retrans_timer(chan
);
4107 __set_monitor_timer(chan
);
4109 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
4112 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4113 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4115 BT_DBG("chan %p, Exit local busy", chan
);
4118 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4120 if (chan
->mode
== L2CAP_MODE_ERTM
) {
4122 l2cap_ertm_enter_local_busy(chan
);
4124 l2cap_ertm_exit_local_busy(chan
);
4128 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
4130 struct sk_buff
*skb
;
4133 while ((skb
= skb_peek(&chan
->srej_q
)) &&
4134 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4137 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
4140 skb
= skb_dequeue(&chan
->srej_q
);
4141 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
4142 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4145 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4149 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
4150 tx_seq
= __next_seq(chan
, tx_seq
);
4154 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4156 struct srej_list
*l
, *tmp
;
4159 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
4160 if (l
->tx_seq
== tx_seq
) {
4165 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4166 control
|= __set_reqseq(chan
, l
->tx_seq
);
4167 l2cap_send_sframe(chan
, control
);
4169 list_add_tail(&l
->list
, &chan
->srej_l
);
4173 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4175 struct srej_list
*new;
4178 while (tx_seq
!= chan
->expected_tx_seq
) {
4179 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4180 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
4181 l2cap_seq_list_append(&chan
->srej_list
, chan
->expected_tx_seq
);
4182 l2cap_send_sframe(chan
, control
);
4184 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
4188 new->tx_seq
= chan
->expected_tx_seq
;
4190 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4192 list_add_tail(&new->list
, &chan
->srej_l
);
4195 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4200 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4202 u16 tx_seq
= __get_txseq(chan
, rx_control
);
4203 u16 req_seq
= __get_reqseq(chan
, rx_control
);
4204 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
4205 int tx_seq_offset
, expected_tx_seq_offset
;
4206 int num_to_ack
= (chan
->tx_win
/6) + 1;
4209 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
4210 tx_seq
, rx_control
);
4212 if (__is_ctrl_final(chan
, rx_control
) &&
4213 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4214 __clear_monitor_timer(chan
);
4215 if (chan
->unacked_frames
> 0)
4216 __set_retrans_timer(chan
);
4217 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4220 chan
->expected_ack_seq
= req_seq
;
4221 l2cap_drop_acked_frames(chan
);
4223 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4225 /* invalid tx_seq */
4226 if (tx_seq_offset
>= chan
->tx_win
) {
4227 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4231 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4232 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4233 l2cap_send_ack(chan
);
4237 if (tx_seq
== chan
->expected_tx_seq
)
4240 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4241 struct srej_list
*first
;
4243 first
= list_first_entry(&chan
->srej_l
,
4244 struct srej_list
, list
);
4245 if (tx_seq
== first
->tx_seq
) {
4246 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4247 l2cap_check_srej_gap(chan
, tx_seq
);
4249 list_del(&first
->list
);
4252 if (list_empty(&chan
->srej_l
)) {
4253 chan
->buffer_seq
= chan
->buffer_seq_srej
;
4254 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4255 l2cap_send_ack(chan
);
4256 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
4259 struct srej_list
*l
;
4261 /* duplicated tx_seq */
4262 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
4265 list_for_each_entry(l
, &chan
->srej_l
, list
) {
4266 if (l
->tx_seq
== tx_seq
) {
4267 l2cap_resend_srejframe(chan
, tx_seq
);
4272 err
= l2cap_send_srejframe(chan
, tx_seq
);
4274 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4279 expected_tx_seq_offset
= __seq_offset(chan
,
4280 chan
->expected_tx_seq
, chan
->buffer_seq
);
4282 /* duplicated tx_seq */
4283 if (tx_seq_offset
< expected_tx_seq_offset
)
4286 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4288 BT_DBG("chan %p, Enter SREJ", chan
);
4290 INIT_LIST_HEAD(&chan
->srej_l
);
4291 chan
->buffer_seq_srej
= chan
->buffer_seq
;
4293 __skb_queue_head_init(&chan
->srej_q
);
4294 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4296 /* Set P-bit only if there are some I-frames to ack. */
4297 if (__clear_ack_timer(chan
))
4298 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
4300 err
= l2cap_send_srejframe(chan
, tx_seq
);
4302 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4309 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4311 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4312 bt_cb(skb
)->tx_seq
= tx_seq
;
4313 bt_cb(skb
)->sar
= sar
;
4314 __skb_queue_tail(&chan
->srej_q
, skb
);
4318 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
4319 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4322 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4326 if (__is_ctrl_final(chan
, rx_control
)) {
4327 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4328 l2cap_retransmit_frames(chan
);
4332 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
4333 if (chan
->num_acked
== num_to_ack
- 1)
4334 l2cap_send_ack(chan
);
4336 __set_ack_timer(chan
);
4345 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4347 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
4348 __get_reqseq(chan
, rx_control
), rx_control
);
4350 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
4351 l2cap_drop_acked_frames(chan
);
4353 if (__is_ctrl_poll(chan
, rx_control
)) {
4354 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4355 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4356 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4357 (chan
->unacked_frames
> 0))
4358 __set_retrans_timer(chan
);
4360 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4361 l2cap_send_srejtail(chan
);
4363 l2cap_send_i_or_rr_or_rnr(chan
);
4366 } else if (__is_ctrl_final(chan
, rx_control
)) {
4367 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4369 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4370 l2cap_retransmit_frames(chan
);
4373 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4374 (chan
->unacked_frames
> 0))
4375 __set_retrans_timer(chan
);
4377 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4378 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4379 l2cap_send_ack(chan
);
4381 l2cap_ertm_send(chan
);
4385 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4387 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4389 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4391 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4393 chan
->expected_ack_seq
= tx_seq
;
4394 l2cap_drop_acked_frames(chan
);
4396 if (__is_ctrl_final(chan
, rx_control
)) {
4397 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4398 l2cap_retransmit_frames(chan
);
4400 l2cap_retransmit_frames(chan
);
4402 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4403 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4406 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4408 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4410 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4412 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4414 if (__is_ctrl_poll(chan
, rx_control
)) {
4415 chan
->expected_ack_seq
= tx_seq
;
4416 l2cap_drop_acked_frames(chan
);
4418 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4419 l2cap_retransmit_one_frame(chan
, tx_seq
);
4421 l2cap_ertm_send(chan
);
4423 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4424 chan
->srej_save_reqseq
= tx_seq
;
4425 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4427 } else if (__is_ctrl_final(chan
, rx_control
)) {
4428 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4429 chan
->srej_save_reqseq
== tx_seq
)
4430 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4432 l2cap_retransmit_one_frame(chan
, tx_seq
);
4434 l2cap_retransmit_one_frame(chan
, tx_seq
);
4435 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4436 chan
->srej_save_reqseq
= tx_seq
;
4437 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4442 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4444 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4446 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4448 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4449 chan
->expected_ack_seq
= tx_seq
;
4450 l2cap_drop_acked_frames(chan
);
4452 if (__is_ctrl_poll(chan
, rx_control
))
4453 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4455 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4456 __clear_retrans_timer(chan
);
4457 if (__is_ctrl_poll(chan
, rx_control
))
4458 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4462 if (__is_ctrl_poll(chan
, rx_control
)) {
4463 l2cap_send_srejtail(chan
);
4465 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4466 l2cap_send_sframe(chan
, rx_control
);
4470 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4472 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4474 if (__is_ctrl_final(chan
, rx_control
) &&
4475 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4476 __clear_monitor_timer(chan
);
4477 if (chan
->unacked_frames
> 0)
4478 __set_retrans_timer(chan
);
4479 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4482 switch (__get_ctrl_super(chan
, rx_control
)) {
4483 case L2CAP_SUPER_RR
:
4484 l2cap_data_channel_rrframe(chan
, rx_control
);
4487 case L2CAP_SUPER_REJ
:
4488 l2cap_data_channel_rejframe(chan
, rx_control
);
4491 case L2CAP_SUPER_SREJ
:
4492 l2cap_data_channel_srejframe(chan
, rx_control
);
4495 case L2CAP_SUPER_RNR
:
4496 l2cap_data_channel_rnrframe(chan
, rx_control
);
4504 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4508 int len
, next_tx_seq_offset
, req_seq_offset
;
4510 __unpack_control(chan
, skb
);
4512 control
= __get_control(chan
, skb
->data
);
4513 skb_pull(skb
, __ctrl_size(chan
));
4517 * We can just drop the corrupted I-frame here.
4518 * Receiver will miss it and start proper recovery
4519 * procedures and ask retransmission.
4521 if (l2cap_check_fcs(chan
, skb
))
4524 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4525 len
-= L2CAP_SDULEN_SIZE
;
4527 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4528 len
-= L2CAP_FCS_SIZE
;
4530 if (len
> chan
->mps
) {
4531 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4535 req_seq
= __get_reqseq(chan
, control
);
4537 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4539 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4540 chan
->expected_ack_seq
);
4542 /* check for invalid req-seq */
4543 if (req_seq_offset
> next_tx_seq_offset
) {
4544 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4548 if (!__is_sframe(chan
, control
)) {
4550 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4554 l2cap_data_channel_iframe(chan
, control
, skb
);
4558 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4562 l2cap_data_channel_sframe(chan
, control
, skb
);
4572 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4574 struct l2cap_chan
*chan
;
4579 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4581 BT_DBG("unknown cid 0x%4.4x", cid
);
4582 /* Drop packet and return */
4587 l2cap_chan_lock(chan
);
4589 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4591 if (chan
->state
!= BT_CONNECTED
)
4594 switch (chan
->mode
) {
4595 case L2CAP_MODE_BASIC
:
4596 /* If socket recv buffers overflows we drop data here
4597 * which is *bad* because L2CAP has to be reliable.
4598 * But we don't have any other choice. L2CAP doesn't
4599 * provide flow control mechanism. */
4601 if (chan
->imtu
< skb
->len
)
4604 if (!chan
->ops
->recv(chan
->data
, skb
))
4608 case L2CAP_MODE_ERTM
:
4609 l2cap_ertm_data_rcv(chan
, skb
);
4613 case L2CAP_MODE_STREAMING
:
4614 control
= __get_control(chan
, skb
->data
);
4615 skb_pull(skb
, __ctrl_size(chan
));
4618 if (l2cap_check_fcs(chan
, skb
))
4621 if (__is_sar_start(chan
, control
))
4622 len
-= L2CAP_SDULEN_SIZE
;
4624 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4625 len
-= L2CAP_FCS_SIZE
;
4627 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4630 tx_seq
= __get_txseq(chan
, control
);
4632 if (chan
->expected_tx_seq
!= tx_seq
) {
4633 /* Frame(s) missing - must discard partial SDU */
4634 kfree_skb(chan
->sdu
);
4636 chan
->sdu_last_frag
= NULL
;
4639 /* TODO: Notify userland of missing data */
4642 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4644 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4645 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4650 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4658 l2cap_chan_unlock(chan
);
4663 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4665 struct l2cap_chan
*chan
;
4667 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
4671 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4673 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4676 if (chan
->imtu
< skb
->len
)
4679 if (!chan
->ops
->recv(chan
->data
, skb
))
4688 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
4689 struct sk_buff
*skb
)
4691 struct l2cap_chan
*chan
;
4693 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
4697 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4699 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4702 if (chan
->imtu
< skb
->len
)
4705 if (!chan
->ops
->recv(chan
->data
, skb
))
4714 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4716 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4720 skb_pull(skb
, L2CAP_HDR_SIZE
);
4721 cid
= __le16_to_cpu(lh
->cid
);
4722 len
= __le16_to_cpu(lh
->len
);
4724 if (len
!= skb
->len
) {
4729 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4732 case L2CAP_CID_LE_SIGNALING
:
4733 case L2CAP_CID_SIGNALING
:
4734 l2cap_sig_channel(conn
, skb
);
4737 case L2CAP_CID_CONN_LESS
:
4738 psm
= get_unaligned((__le16
*) skb
->data
);
4740 l2cap_conless_channel(conn
, psm
, skb
);
4743 case L2CAP_CID_LE_DATA
:
4744 l2cap_att_channel(conn
, cid
, skb
);
4748 if (smp_sig_channel(conn
, skb
))
4749 l2cap_conn_del(conn
->hcon
, EACCES
);
4753 l2cap_data_channel(conn
, cid
, skb
);
4758 /* ---- L2CAP interface with lower layer (HCI) ---- */
4760 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4762 int exact
= 0, lm1
= 0, lm2
= 0;
4763 struct l2cap_chan
*c
;
4765 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4767 /* Find listening sockets and check their link_mode */
4768 read_lock(&chan_list_lock
);
4769 list_for_each_entry(c
, &chan_list
, global_l
) {
4770 struct sock
*sk
= c
->sk
;
4772 if (c
->state
!= BT_LISTEN
)
4775 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4776 lm1
|= HCI_LM_ACCEPT
;
4777 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4778 lm1
|= HCI_LM_MASTER
;
4780 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4781 lm2
|= HCI_LM_ACCEPT
;
4782 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4783 lm2
|= HCI_LM_MASTER
;
4786 read_unlock(&chan_list_lock
);
4788 return exact
? lm1
: lm2
;
4791 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4793 struct l2cap_conn
*conn
;
4795 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4798 conn
= l2cap_conn_add(hcon
, status
);
4800 l2cap_conn_ready(conn
);
4802 l2cap_conn_del(hcon
, bt_to_errno(status
));
4807 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4809 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4811 BT_DBG("hcon %p", hcon
);
4814 return HCI_ERROR_REMOTE_USER_TERM
;
4815 return conn
->disc_reason
;
4818 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4820 BT_DBG("hcon %p reason %d", hcon
, reason
);
4822 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4826 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4828 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4831 if (encrypt
== 0x00) {
4832 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4833 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4834 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4835 l2cap_chan_close(chan
, ECONNREFUSED
);
4837 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4838 __clear_chan_timer(chan
);
4842 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4844 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4845 struct l2cap_chan
*chan
;
4850 BT_DBG("conn %p", conn
);
4852 if (hcon
->type
== LE_LINK
) {
4853 if (!status
&& encrypt
)
4854 smp_distribute_keys(conn
, 0);
4855 cancel_delayed_work(&conn
->security_timer
);
4858 mutex_lock(&conn
->chan_lock
);
4860 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4861 l2cap_chan_lock(chan
);
4863 BT_DBG("chan->scid %d", chan
->scid
);
4865 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4866 if (!status
&& encrypt
) {
4867 chan
->sec_level
= hcon
->sec_level
;
4868 l2cap_chan_ready(chan
);
4871 l2cap_chan_unlock(chan
);
4875 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4876 l2cap_chan_unlock(chan
);
4880 if (!status
&& (chan
->state
== BT_CONNECTED
||
4881 chan
->state
== BT_CONFIG
)) {
4882 l2cap_check_encryption(chan
, encrypt
);
4883 l2cap_chan_unlock(chan
);
4887 if (chan
->state
== BT_CONNECT
) {
4889 l2cap_send_conn_req(chan
);
4891 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4893 } else if (chan
->state
== BT_CONNECT2
) {
4894 struct sock
*sk
= chan
->sk
;
4895 struct l2cap_conn_rsp rsp
;
4901 if (bt_sk(sk
)->defer_setup
) {
4902 struct sock
*parent
= bt_sk(sk
)->parent
;
4903 res
= L2CAP_CR_PEND
;
4904 stat
= L2CAP_CS_AUTHOR_PEND
;
4906 parent
->sk_data_ready(parent
, 0);
4908 __l2cap_state_change(chan
, BT_CONFIG
);
4909 res
= L2CAP_CR_SUCCESS
;
4910 stat
= L2CAP_CS_NO_INFO
;
4913 __l2cap_state_change(chan
, BT_DISCONN
);
4914 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4915 res
= L2CAP_CR_SEC_BLOCK
;
4916 stat
= L2CAP_CS_NO_INFO
;
4921 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4922 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4923 rsp
.result
= cpu_to_le16(res
);
4924 rsp
.status
= cpu_to_le16(stat
);
4925 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4929 l2cap_chan_unlock(chan
);
4932 mutex_unlock(&conn
->chan_lock
);
4937 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4939 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4942 conn
= l2cap_conn_add(hcon
, 0);
4947 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4949 if (!(flags
& ACL_CONT
)) {
4950 struct l2cap_hdr
*hdr
;
4951 struct l2cap_chan
*chan
;
4956 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4957 kfree_skb(conn
->rx_skb
);
4958 conn
->rx_skb
= NULL
;
4960 l2cap_conn_unreliable(conn
, ECOMM
);
4963 /* Start fragment always begin with Basic L2CAP header */
4964 if (skb
->len
< L2CAP_HDR_SIZE
) {
4965 BT_ERR("Frame is too short (len %d)", skb
->len
);
4966 l2cap_conn_unreliable(conn
, ECOMM
);
4970 hdr
= (struct l2cap_hdr
*) skb
->data
;
4971 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4972 cid
= __le16_to_cpu(hdr
->cid
);
4974 if (len
== skb
->len
) {
4975 /* Complete frame received */
4976 l2cap_recv_frame(conn
, skb
);
4980 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4982 if (skb
->len
> len
) {
4983 BT_ERR("Frame is too long (len %d, expected len %d)",
4985 l2cap_conn_unreliable(conn
, ECOMM
);
4989 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4991 if (chan
&& chan
->sk
) {
4992 struct sock
*sk
= chan
->sk
;
4995 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4996 BT_ERR("Frame exceeding recv MTU (len %d, "
5000 l2cap_conn_unreliable(conn
, ECOMM
);
5006 /* Allocate skb for the complete frame (with header) */
5007 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5011 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5013 conn
->rx_len
= len
- skb
->len
;
5015 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5017 if (!conn
->rx_len
) {
5018 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5019 l2cap_conn_unreliable(conn
, ECOMM
);
5023 if (skb
->len
> conn
->rx_len
) {
5024 BT_ERR("Fragment is too long (len %d, expected %d)",
5025 skb
->len
, conn
->rx_len
);
5026 kfree_skb(conn
->rx_skb
);
5027 conn
->rx_skb
= NULL
;
5029 l2cap_conn_unreliable(conn
, ECOMM
);
5033 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5035 conn
->rx_len
-= skb
->len
;
5037 if (!conn
->rx_len
) {
5038 /* Complete frame received */
5039 l2cap_recv_frame(conn
, conn
->rx_skb
);
5040 conn
->rx_skb
= NULL
;
5049 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5051 struct l2cap_chan
*c
;
5053 read_lock(&chan_list_lock
);
5055 list_for_each_entry(c
, &chan_list
, global_l
) {
5056 struct sock
*sk
= c
->sk
;
5058 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5059 batostr(&bt_sk(sk
)->src
),
5060 batostr(&bt_sk(sk
)->dst
),
5061 c
->state
, __le16_to_cpu(c
->psm
),
5062 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5063 c
->sec_level
, c
->mode
);
5066 read_unlock(&chan_list_lock
);
5071 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5073 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5076 static const struct file_operations l2cap_debugfs_fops
= {
5077 .open
= l2cap_debugfs_open
,
5079 .llseek
= seq_lseek
,
5080 .release
= single_release
,
5083 static struct dentry
*l2cap_debugfs
;
5085 int __init
l2cap_init(void)
5089 err
= l2cap_init_sockets();
5094 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5095 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5097 BT_ERR("Failed to create L2CAP debug file");
5103 void l2cap_exit(void)
5105 debugfs_remove(l2cap_debugfs
);
5106 l2cap_cleanup_sockets();
5109 module_param(disable_ertm
, bool, 0644);
5110 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");